Gender Classification -VGG model
up vote
-2
down vote
favorite
I am using the below code for classifying human gender(M vs F). However its overfitting and val accuracy is even not going 90% . Need your suggestion in this.
img_width, img_height =128,128
top_model_weights_path = 'bottleneck_fc_model.h5'
train_data_dir = 'Train'
validation_data_dir = 'Test'
nb_train_samples = 30000
nb_validation_samples = 7000
epochs = 150
batch_size = 128
def save_bottlebeck_features():
datagen = ImageDataGenerator(rescale=1. / 255)
# build the VGG16 network
model = applications.VGG16(include_top=False, weights='imagenet')
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
predict_size_train = int(math.ceil(nb_train_samples / batch_size))
bottleneck_features_train = model.predict_generator(generator, predict_size_train)
np.save('bottleneck_features_train.npy',
bottleneck_features_train)
generator = datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
predict_size_validation = int(math.ceil(nb_validation_samples / batch_size))
bottleneck_features_validation = model.predict_generator(generator, predict_size_validation)
np.save('bottleneck_features_validation.npy',
bottleneck_features_validation)
def train_top_model():
train_data = np.load('bottleneck_features_train.npy')
train_labels = np.array(
[0] * (nb_train_samples // 2) + [1] * (nb_train_samples // 2))
validation_data = np.load('bottleneck_features_validation.npy')
validation_labels = np.array(
[0] * (nb_validation_samples // 2) + [1] * (nb_validation_samples // 2))
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy', metrics=['accuracy'])
model.fit(train_data, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(validation_data, validation_labels))
model.save_weights(top_model_weights_path)
save_bottlebeck_features()
train_top_model()
Here is some of the last epochs
Epoch 130/150
loss: 0.0337 - acc: 0.9902 - val_loss: 1.1683 - val_acc: 0.8356
Epoch 131/150
loss: 0.0307 - acc: 0.9919 - val_loss: 1.0721 - val_acc: 0.8345
Epoch 132/150
loss: 0.0313 - acc: 0.9914 - val_loss: 1.1606 - val_acc: 0.8342
Epoch 133/150
loss: 0.0316 - acc: 0.9914 - val_loss: 1.1487 - val_acc: 0.8347
Epoch 134/150
loss: 0.0311 - acc: 0.9909 - val_loss: 1.1363 - val_acc: 0.8356
Epoch 135/150
loss: 0.0295 - acc: 0.9914 - val_loss: 1.2289 - val_acc: 0.8355
Epoch 136/150
loss: 0.0325 - acc: 0.9912 - val_loss: 1.1787 - val_acc: 0.8345
Epoch 137/150
loss: 0.0276 - acc: 0.9922 - val_loss: 1.2281 - val_acc: 0.8337
Epoch 138/150
loss: 0.0314 - acc: 0.9918 - val_loss: 1.1973 - val_acc: 0.8352
Epoch 139/150
loss: 0.0298 - acc: 0.9913 - val_loss: 1.1551 - val_acc: 0.8311
Epoch 140/150
loss: 0.0301 - acc: 0.9919 - val_loss: 1.2301 - val_acc: 0.8339
Epoch 141/150
loss: 0.0315 - acc: 0.9917 - val_loss: 1.1344 - val_acc: 0.8328
Epoch 142/150
loss: 0.0290 - acc: 0.9918 - val_loss: 1.2094 - val_acc: 0.8286
Epoch 143/150
loss: 0.0292 - acc: 0.9919 - val_loss: 1.1449 - val_acc: 0.8358
Epoch 144/150
loss: 0.0284 - acc: 0.9925 - val_loss: 1.2666 - val_acc: 0.8267
Epoch 145/150
loss: 0.0328 - acc: 0.9913 - val_loss: 1.1720 - val_acc: 0.8331
Epoch 146/150
loss: 0.0270 - acc: 0.9928 - val_loss: 1.2077 - val_acc: 0.8355
Epoch 147/150
loss: 0.0338 - acc: 0.9907 - val_loss: 1.2715 - val_acc: 0.8313
Epoch 148/150
loss: 0.0276 - acc: 0.9923 - val_loss: 1.3014 - val_acc: 0.8223
Epoch 149/150
loss: 0.0290 - acc: 0.9923 - val_loss: 1.2123 - val_acc: 0.8291
Epoch 150/150
loss: 0.0317 - acc: 0.9920 - val_loss: 1.2682 - val_acc: 0.8277
Its clearly over fitting and needs more data . However for Cats vs Dogs having 10K data this code works and val accuracy goes above 90% in 4-5 epoch . Need help on this.
keras neural-network deep-learning computer-vision conv-neural-network
add a comment |
up vote
-2
down vote
favorite
I am using the below code for classifying human gender(M vs F). However its overfitting and val accuracy is even not going 90% . Need your suggestion in this.
img_width, img_height =128,128
top_model_weights_path = 'bottleneck_fc_model.h5'
train_data_dir = 'Train'
validation_data_dir = 'Test'
nb_train_samples = 30000
nb_validation_samples = 7000
epochs = 150
batch_size = 128
def save_bottlebeck_features():
datagen = ImageDataGenerator(rescale=1. / 255)
# build the VGG16 network
model = applications.VGG16(include_top=False, weights='imagenet')
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
predict_size_train = int(math.ceil(nb_train_samples / batch_size))
bottleneck_features_train = model.predict_generator(generator, predict_size_train)
np.save('bottleneck_features_train.npy',
bottleneck_features_train)
generator = datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
predict_size_validation = int(math.ceil(nb_validation_samples / batch_size))
bottleneck_features_validation = model.predict_generator(generator, predict_size_validation)
np.save('bottleneck_features_validation.npy',
bottleneck_features_validation)
def train_top_model():
train_data = np.load('bottleneck_features_train.npy')
train_labels = np.array(
[0] * (nb_train_samples // 2) + [1] * (nb_train_samples // 2))
validation_data = np.load('bottleneck_features_validation.npy')
validation_labels = np.array(
[0] * (nb_validation_samples // 2) + [1] * (nb_validation_samples // 2))
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy', metrics=['accuracy'])
model.fit(train_data, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(validation_data, validation_labels))
model.save_weights(top_model_weights_path)
save_bottlebeck_features()
train_top_model()
Here is some of the last epochs
Epoch 130/150
loss: 0.0337 - acc: 0.9902 - val_loss: 1.1683 - val_acc: 0.8356
Epoch 131/150
loss: 0.0307 - acc: 0.9919 - val_loss: 1.0721 - val_acc: 0.8345
Epoch 132/150
loss: 0.0313 - acc: 0.9914 - val_loss: 1.1606 - val_acc: 0.8342
Epoch 133/150
loss: 0.0316 - acc: 0.9914 - val_loss: 1.1487 - val_acc: 0.8347
Epoch 134/150
loss: 0.0311 - acc: 0.9909 - val_loss: 1.1363 - val_acc: 0.8356
Epoch 135/150
loss: 0.0295 - acc: 0.9914 - val_loss: 1.2289 - val_acc: 0.8355
Epoch 136/150
loss: 0.0325 - acc: 0.9912 - val_loss: 1.1787 - val_acc: 0.8345
Epoch 137/150
loss: 0.0276 - acc: 0.9922 - val_loss: 1.2281 - val_acc: 0.8337
Epoch 138/150
loss: 0.0314 - acc: 0.9918 - val_loss: 1.1973 - val_acc: 0.8352
Epoch 139/150
loss: 0.0298 - acc: 0.9913 - val_loss: 1.1551 - val_acc: 0.8311
Epoch 140/150
loss: 0.0301 - acc: 0.9919 - val_loss: 1.2301 - val_acc: 0.8339
Epoch 141/150
loss: 0.0315 - acc: 0.9917 - val_loss: 1.1344 - val_acc: 0.8328
Epoch 142/150
loss: 0.0290 - acc: 0.9918 - val_loss: 1.2094 - val_acc: 0.8286
Epoch 143/150
loss: 0.0292 - acc: 0.9919 - val_loss: 1.1449 - val_acc: 0.8358
Epoch 144/150
loss: 0.0284 - acc: 0.9925 - val_loss: 1.2666 - val_acc: 0.8267
Epoch 145/150
loss: 0.0328 - acc: 0.9913 - val_loss: 1.1720 - val_acc: 0.8331
Epoch 146/150
loss: 0.0270 - acc: 0.9928 - val_loss: 1.2077 - val_acc: 0.8355
Epoch 147/150
loss: 0.0338 - acc: 0.9907 - val_loss: 1.2715 - val_acc: 0.8313
Epoch 148/150
loss: 0.0276 - acc: 0.9923 - val_loss: 1.3014 - val_acc: 0.8223
Epoch 149/150
loss: 0.0290 - acc: 0.9923 - val_loss: 1.2123 - val_acc: 0.8291
Epoch 150/150
loss: 0.0317 - acc: 0.9920 - val_loss: 1.2682 - val_acc: 0.8277
Its clearly over fitting and needs more data . However for Cats vs Dogs having 10K data this code works and val accuracy goes above 90% in 4-5 epoch . Need help on this.
keras neural-network deep-learning computer-vision conv-neural-network
3
Combating overfitting is an open research field. Nevertheless, there are enormous amount of material available on how to prevent overfitting with a simple google search, which is just as likely to help you as any answer here, given that no training images/data are given and this is not reproducable.
– Thomas Pinetz
Nov 11 at 13:26
Which layers are trainable?
– N.IT
Nov 12 at 6:27
add a comment |
up vote
-2
down vote
favorite
up vote
-2
down vote
favorite
I am using the below code for classifying human gender(M vs F). However its overfitting and val accuracy is even not going 90% . Need your suggestion in this.
img_width, img_height =128,128
top_model_weights_path = 'bottleneck_fc_model.h5'
train_data_dir = 'Train'
validation_data_dir = 'Test'
nb_train_samples = 30000
nb_validation_samples = 7000
epochs = 150
batch_size = 128
def save_bottlebeck_features():
datagen = ImageDataGenerator(rescale=1. / 255)
# build the VGG16 network
model = applications.VGG16(include_top=False, weights='imagenet')
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
predict_size_train = int(math.ceil(nb_train_samples / batch_size))
bottleneck_features_train = model.predict_generator(generator, predict_size_train)
np.save('bottleneck_features_train.npy',
bottleneck_features_train)
generator = datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
predict_size_validation = int(math.ceil(nb_validation_samples / batch_size))
bottleneck_features_validation = model.predict_generator(generator, predict_size_validation)
np.save('bottleneck_features_validation.npy',
bottleneck_features_validation)
def train_top_model():
train_data = np.load('bottleneck_features_train.npy')
train_labels = np.array(
[0] * (nb_train_samples // 2) + [1] * (nb_train_samples // 2))
validation_data = np.load('bottleneck_features_validation.npy')
validation_labels = np.array(
[0] * (nb_validation_samples // 2) + [1] * (nb_validation_samples // 2))
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy', metrics=['accuracy'])
model.fit(train_data, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(validation_data, validation_labels))
model.save_weights(top_model_weights_path)
save_bottlebeck_features()
train_top_model()
Here is some of the last epochs
Epoch 130/150
loss: 0.0337 - acc: 0.9902 - val_loss: 1.1683 - val_acc: 0.8356
Epoch 131/150
loss: 0.0307 - acc: 0.9919 - val_loss: 1.0721 - val_acc: 0.8345
Epoch 132/150
loss: 0.0313 - acc: 0.9914 - val_loss: 1.1606 - val_acc: 0.8342
Epoch 133/150
loss: 0.0316 - acc: 0.9914 - val_loss: 1.1487 - val_acc: 0.8347
Epoch 134/150
loss: 0.0311 - acc: 0.9909 - val_loss: 1.1363 - val_acc: 0.8356
Epoch 135/150
loss: 0.0295 - acc: 0.9914 - val_loss: 1.2289 - val_acc: 0.8355
Epoch 136/150
loss: 0.0325 - acc: 0.9912 - val_loss: 1.1787 - val_acc: 0.8345
Epoch 137/150
loss: 0.0276 - acc: 0.9922 - val_loss: 1.2281 - val_acc: 0.8337
Epoch 138/150
loss: 0.0314 - acc: 0.9918 - val_loss: 1.1973 - val_acc: 0.8352
Epoch 139/150
loss: 0.0298 - acc: 0.9913 - val_loss: 1.1551 - val_acc: 0.8311
Epoch 140/150
loss: 0.0301 - acc: 0.9919 - val_loss: 1.2301 - val_acc: 0.8339
Epoch 141/150
loss: 0.0315 - acc: 0.9917 - val_loss: 1.1344 - val_acc: 0.8328
Epoch 142/150
loss: 0.0290 - acc: 0.9918 - val_loss: 1.2094 - val_acc: 0.8286
Epoch 143/150
loss: 0.0292 - acc: 0.9919 - val_loss: 1.1449 - val_acc: 0.8358
Epoch 144/150
loss: 0.0284 - acc: 0.9925 - val_loss: 1.2666 - val_acc: 0.8267
Epoch 145/150
loss: 0.0328 - acc: 0.9913 - val_loss: 1.1720 - val_acc: 0.8331
Epoch 146/150
loss: 0.0270 - acc: 0.9928 - val_loss: 1.2077 - val_acc: 0.8355
Epoch 147/150
loss: 0.0338 - acc: 0.9907 - val_loss: 1.2715 - val_acc: 0.8313
Epoch 148/150
loss: 0.0276 - acc: 0.9923 - val_loss: 1.3014 - val_acc: 0.8223
Epoch 149/150
loss: 0.0290 - acc: 0.9923 - val_loss: 1.2123 - val_acc: 0.8291
Epoch 150/150
loss: 0.0317 - acc: 0.9920 - val_loss: 1.2682 - val_acc: 0.8277
Its clearly over fitting and needs more data . However for Cats vs Dogs having 10K data this code works and val accuracy goes above 90% in 4-5 epoch . Need help on this.
keras neural-network deep-learning computer-vision conv-neural-network
I am using the below code for classifying human gender(M vs F). However its overfitting and val accuracy is even not going 90% . Need your suggestion in this.
img_width, img_height =128,128
top_model_weights_path = 'bottleneck_fc_model.h5'
train_data_dir = 'Train'
validation_data_dir = 'Test'
nb_train_samples = 30000
nb_validation_samples = 7000
epochs = 150
batch_size = 128
def save_bottlebeck_features():
datagen = ImageDataGenerator(rescale=1. / 255)
# build the VGG16 network
model = applications.VGG16(include_top=False, weights='imagenet')
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
predict_size_train = int(math.ceil(nb_train_samples / batch_size))
bottleneck_features_train = model.predict_generator(generator, predict_size_train)
np.save('bottleneck_features_train.npy',
bottleneck_features_train)
generator = datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
predict_size_validation = int(math.ceil(nb_validation_samples / batch_size))
bottleneck_features_validation = model.predict_generator(generator, predict_size_validation)
np.save('bottleneck_features_validation.npy',
bottleneck_features_validation)
def train_top_model():
train_data = np.load('bottleneck_features_train.npy')
train_labels = np.array(
[0] * (nb_train_samples // 2) + [1] * (nb_train_samples // 2))
validation_data = np.load('bottleneck_features_validation.npy')
validation_labels = np.array(
[0] * (nb_validation_samples // 2) + [1] * (nb_validation_samples // 2))
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy', metrics=['accuracy'])
model.fit(train_data, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(validation_data, validation_labels))
model.save_weights(top_model_weights_path)
save_bottlebeck_features()
train_top_model()
Here is some of the last epochs
Epoch 130/150
loss: 0.0337 - acc: 0.9902 - val_loss: 1.1683 - val_acc: 0.8356
Epoch 131/150
loss: 0.0307 - acc: 0.9919 - val_loss: 1.0721 - val_acc: 0.8345
Epoch 132/150
loss: 0.0313 - acc: 0.9914 - val_loss: 1.1606 - val_acc: 0.8342
Epoch 133/150
loss: 0.0316 - acc: 0.9914 - val_loss: 1.1487 - val_acc: 0.8347
Epoch 134/150
loss: 0.0311 - acc: 0.9909 - val_loss: 1.1363 - val_acc: 0.8356
Epoch 135/150
loss: 0.0295 - acc: 0.9914 - val_loss: 1.2289 - val_acc: 0.8355
Epoch 136/150
loss: 0.0325 - acc: 0.9912 - val_loss: 1.1787 - val_acc: 0.8345
Epoch 137/150
loss: 0.0276 - acc: 0.9922 - val_loss: 1.2281 - val_acc: 0.8337
Epoch 138/150
loss: 0.0314 - acc: 0.9918 - val_loss: 1.1973 - val_acc: 0.8352
Epoch 139/150
loss: 0.0298 - acc: 0.9913 - val_loss: 1.1551 - val_acc: 0.8311
Epoch 140/150
loss: 0.0301 - acc: 0.9919 - val_loss: 1.2301 - val_acc: 0.8339
Epoch 141/150
loss: 0.0315 - acc: 0.9917 - val_loss: 1.1344 - val_acc: 0.8328
Epoch 142/150
loss: 0.0290 - acc: 0.9918 - val_loss: 1.2094 - val_acc: 0.8286
Epoch 143/150
loss: 0.0292 - acc: 0.9919 - val_loss: 1.1449 - val_acc: 0.8358
Epoch 144/150
loss: 0.0284 - acc: 0.9925 - val_loss: 1.2666 - val_acc: 0.8267
Epoch 145/150
loss: 0.0328 - acc: 0.9913 - val_loss: 1.1720 - val_acc: 0.8331
Epoch 146/150
loss: 0.0270 - acc: 0.9928 - val_loss: 1.2077 - val_acc: 0.8355
Epoch 147/150
loss: 0.0338 - acc: 0.9907 - val_loss: 1.2715 - val_acc: 0.8313
Epoch 148/150
loss: 0.0276 - acc: 0.9923 - val_loss: 1.3014 - val_acc: 0.8223
Epoch 149/150
loss: 0.0290 - acc: 0.9923 - val_loss: 1.2123 - val_acc: 0.8291
Epoch 150/150
loss: 0.0317 - acc: 0.9920 - val_loss: 1.2682 - val_acc: 0.8277
Its clearly over fitting and needs more data . However for Cats vs Dogs having 10K data this code works and val accuracy goes above 90% in 4-5 epoch . Need help on this.
keras neural-network deep-learning computer-vision conv-neural-network
keras neural-network deep-learning computer-vision conv-neural-network
asked Nov 11 at 10:34
steveJ
14619
14619
3
Combating overfitting is an open research field. Nevertheless, there are enormous amount of material available on how to prevent overfitting with a simple google search, which is just as likely to help you as any answer here, given that no training images/data are given and this is not reproducable.
– Thomas Pinetz
Nov 11 at 13:26
Which layers are trainable?
– N.IT
Nov 12 at 6:27
add a comment |
3
Combating overfitting is an open research field. Nevertheless, there are enormous amount of material available on how to prevent overfitting with a simple google search, which is just as likely to help you as any answer here, given that no training images/data are given and this is not reproducable.
– Thomas Pinetz
Nov 11 at 13:26
Which layers are trainable?
– N.IT
Nov 12 at 6:27
3
3
Combating overfitting is an open research field. Nevertheless, there are enormous amount of material available on how to prevent overfitting with a simple google search, which is just as likely to help you as any answer here, given that no training images/data are given and this is not reproducable.
– Thomas Pinetz
Nov 11 at 13:26
Combating overfitting is an open research field. Nevertheless, there are enormous amount of material available on how to prevent overfitting with a simple google search, which is just as likely to help you as any answer here, given that no training images/data are given and this is not reproducable.
– Thomas Pinetz
Nov 11 at 13:26
Which layers are trainable?
– N.IT
Nov 12 at 6:27
Which layers are trainable?
– N.IT
Nov 12 at 6:27
add a comment |
1 Answer
1
active
oldest
votes
up vote
1
down vote
Try these suggestions (tune until you reach to the wanted result):
- Change optimizer (try adam) .
- Change learning rate (try small learning rate)
- Add regularize L2
- Add batch normalization layer.
- The layers after flatten >> make it as vgg16.
add a comment |
1 Answer
1
active
oldest
votes
1 Answer
1
active
oldest
votes
active
oldest
votes
active
oldest
votes
up vote
1
down vote
Try these suggestions (tune until you reach to the wanted result):
- Change optimizer (try adam) .
- Change learning rate (try small learning rate)
- Add regularize L2
- Add batch normalization layer.
- The layers after flatten >> make it as vgg16.
add a comment |
up vote
1
down vote
Try these suggestions (tune until you reach to the wanted result):
- Change optimizer (try adam) .
- Change learning rate (try small learning rate)
- Add regularize L2
- Add batch normalization layer.
- The layers after flatten >> make it as vgg16.
add a comment |
up vote
1
down vote
up vote
1
down vote
Try these suggestions (tune until you reach to the wanted result):
- Change optimizer (try adam) .
- Change learning rate (try small learning rate)
- Add regularize L2
- Add batch normalization layer.
- The layers after flatten >> make it as vgg16.
Try these suggestions (tune until you reach to the wanted result):
- Change optimizer (try adam) .
- Change learning rate (try small learning rate)
- Add regularize L2
- Add batch normalization layer.
- The layers after flatten >> make it as vgg16.
answered Nov 12 at 6:32
N.IT
1319
1319
add a comment |
add a comment |
Thanks for contributing an answer to Stack Overflow!
- Please be sure to answer the question. Provide details and share your research!
But avoid …
- Asking for help, clarification, or responding to other answers.
- Making statements based on opinion; back them up with references or personal experience.
To learn more, see our tips on writing great answers.
Some of your past answers have not been well-received, and you're in danger of being blocked from answering.
Please pay close attention to the following guidance:
- Please be sure to answer the question. Provide details and share your research!
But avoid …
- Asking for help, clarification, or responding to other answers.
- Making statements based on opinion; back them up with references or personal experience.
To learn more, see our tips on writing great answers.
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function ()
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53247877%2fgender-classification-vgg-model%23new-answer', 'question_page');
);
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
3
Combating overfitting is an open research field. Nevertheless, there are enormous amount of material available on how to prevent overfitting with a simple google search, which is just as likely to help you as any answer here, given that no training images/data are given and this is not reproducable.
– Thomas Pinetz
Nov 11 at 13:26
Which layers are trainable?
– N.IT
Nov 12 at 6:27