Hot questions for Using Neural networks in vgg net

Question:

There is an example of VGG16 fine-tuning on keras blog, but I can't reproduce it.

More precisely, here is code used to init VGG16 without top layer and to freeze all blocks except the topmost:

WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('vgg16_weights.h5', WEIGHTS_PATH_NO_TOP)

model = Sequential()
model.add(InputLayer(input_shape=(150, 150, 3)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block5_maxpool'))

model.load_weights(weights_path)

for layer in model.layers:
    layer.trainable = False

for layer in model.layers[-4:]:
    layer.trainable = True
    print("Layer '%s' is trainable" % layer.name)  

Next, creating a top model with single hidden layer:

top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
top_model.load_weights('top_model.h5')

Note that it was previously trained on bottleneck features like it is described in the blog post. Next, add this top model to the base model and compile:

model.add(top_model)
model.compile(loss='binary_crossentropy',
              optimizer=SGD(lr=1e-4, momentum=0.9),
              metrics=['accuracy'])

And eventually, fit on cats/dogs data:

batch_size = 16

train_datagen = ImageDataGenerator(rescale=1./255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1./255)

train_gen = train_datagen.flow_from_directory(
    TRAIN_DIR,
    target_size=(150, 150),
    batch_size=batch_size,
    class_mode='binary')

valid_gen = test_datagen.flow_from_directory(
    VALID_DIR,
    target_size=(150, 150),
    batch_size=batch_size,
    class_mode='binary')

model.fit_generator(
    train_gen,
    steps_per_epoch=nb_train_samples // batch_size,
    epochs=nb_epoch,
    validation_data=valid_gen,
    validation_steps=nb_valid_samples // batch_size)

But here is an error I am getting when trying to fit:

ValueError: Error when checking model target: expected block5_maxpool to have 4 > dimensions, but got array with shape (16, 1)

Therefore, it seems that something is wrong with the last pooling layer in base model. Or probably I've done something wrong trying to connect base model with the top one.

Does anybody have similar issue? Or maybe there is a better way to build such "concatenated" models? I am using keras==2.0.0 with theano backend.

Note: I was using examples from gist and applications.VGG16 utility, but has issues trying to concatenate models, I am not too familiar with keras functional API. So this solution I provide here is the most "successful" one, i.e. it fails only on fitting stage.


Update #1

Ok, here is a small explanation about what I am trying to do. First of all, I am generating bottleneck features from VGG16 as follows:

def save_bottleneck_features():
    datagen = ImageDataGenerator(rescale=1./255)
    model = applications.VGG16(include_top=False, weights='imagenet')

    generator = datagen.flow_from_directory(
        TRAIN_DIR,
        target_size=(150, 150),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)    
    print("Predicting train samples..")
    bottleneck_features_train = model.predict_generator(generator, nb_train_samples)
    np.save(open('bottleneck_features_train.npy', 'w'), bottleneck_features_train)

    generator = datagen.flow_from_directory(
        VALID_DIR,
        target_size=(150, 150),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)
    print("Predicting valid samples..")
    bottleneck_features_valid = model.predict_generator(generator, nb_valid_samples)
    np.save(open('bottleneck_features_valid.npy', 'w'), bottleneck_features_valid)

Then, I create a top model and train it on these features as follows:

def train_top_model():
    train_data = np.load(open('bottleneck_features_train.npy'))
    train_labels = np.array([0]*(nb_train_samples / 2) + 
                            [1]*(nb_train_samples / 2))
    valid_data = np.load(open('bottleneck_features_valid.npy'))
    valid_labels = np.array([0]*(nb_valid_samples / 2) +
                            [1]*(nb_valid_samples / 2))
    model = Sequential()
    model.add(Flatten(input_shape=train_data.shape[1:]))  
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
    model.fit(train_data, train_labels,
              nb_epoch=nb_epoch,
              batch_size=batch_size,
              validation_data=(valid_data, valid_labels),
              verbose=1)
    model.save_weights('top_model.h5')   

So basically, there are two trained models, base_model with ImageNet weights and top_model with weights generated from bottleneck features. And I wonder how to concatenate them? Is it possible or I am doing something wrong? Because as I can see, the response from @thomas-pinetz supposes that the top model is not trained separately and right away appended to the model. Not sure if I am clear, here is a quote from the blog:

In order to perform fine-tuning, all layers should start with properly trained weights: for instance you should not slap a randomly initialized fully-connected network on top of a pre-trained convolutional base. This is because the large gradient updates triggered by the randomly initialized weights would wreck the learned weights in the convolutional base. In our case this is why we first train the top-level classifier, and only then start fine-tuning convolutional weights alongside it.


Answer:

I think that the weights described by the vgg net do not fit your model and the error stems from this. Anyways there is a way better way to do this using the network itself as described in (https://keras.io/applications/#vgg16).

You can just use:

base_model = keras.applications.vgg16.VGG16(include_top=False, weights='imagenet', input_tensor=None, input_shape=None)

to instantiate a vgg net that is pre-trained. Then you can freeze the layers and use the model class to instantiate your own model like this:

x = base_model.output
x = Flatten()(x)
x = Dense(your_classes, activation='softmax')(x) #minor edit
new_model = Model(input=base_model.input, output=x)

To combine the bottom and the top network you can use the following code snippet. The following functions are used (Input Layer (https://keras.io/getting-started/functional-api-guide/) / load_model (https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) and the functional API of keras):

final_input = Input(shape=(3, 224, 224))
base_model = vgg...
top_model = load_model(weights_file)

x = base_model(final_input)
result = top_model(x)
final_model = Model(input=final_input, output=result)

Question:

I'm replicating the steps in http://caffe.berkeleyvision.org/gathered/examples/finetune_flickr_style.html

I want to change the network to VGG model which is obtained at http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_16_layers.caffemodel

does it suffice to simply substitute the model parameter as following?

./build/tools/caffe train -solver models/finetune_flickr_style/solver.prototxt -weights VGG_ISLVRC_16_layers.caffemodel -gpu 0

Or do I need to adjust learning rates, iterations, i.e. does it come with separate prototxt files?


Answer:

There needs to be a 1-1 correspondence between the weights of the network you want to train and the weights you use for initializing/fine-tuning. The architecture of the old and new model have to match.

VGG-16 has a different architecture than the model described by models/finetune_flickr_style/train_val.prototxt (FlickrStyleCaffeNet). This is the network that the solver will try to optimize. Even if it doesn't crash, the weights you've loaded don't have any meaning in the new network.

The VGG-16 network is described in the deploy.prototxt file on this page in Caffe's Model Zoo.

Question:

I want to train VGG on 128x128-sized images. I don't want to rescale them to 224x224 to save GPU-memory and training time. What would be the proper way to do so?


Answer:

The best way is to keep the convolutional part as it is and replace the fully connected layers. This way it is even possible to take pretrained weights for the convolutional part of the network. The fully connected layers must be randomly initialized. This way one can finetune a network with a smaller input size.

Here some pytorch code

import torch
from torch.autograd import Variable
import torchvision
import torch.nn as nn

from torchvision.models.vgg import model_urls

VGG_TYPES = {'vgg11' : torchvision.models.vgg11, 
             'vgg11_bn' : torchvision.models.vgg11_bn, 
             'vgg13' : torchvision.models.vgg13, 
             'vgg13_bn' : torchvision.models.vgg13_bn, 
             'vgg16' : torchvision.models.vgg16, 
             'vgg16_bn' : torchvision.models.vgg16_bn,
             'vgg19_bn' : torchvision.models.vgg19_bn, 
             'vgg19' : torchvision.models.vgg19}


class Custom_VGG(nn.Module):

    def __init__(self,
                 ipt_size=(128, 128), 
                 pretrained=True, 
                 vgg_type='vgg19_bn', 
                 num_classes=1000):
        super(Custom_VGG, self).__init__()

        # load convolutional part of vgg
        assert vgg_type in VGG_TYPES, "Unknown vgg_type '{}'".format(vgg_type)
        vgg_loader = VGG_TYPES[vgg_type]
        vgg = vgg_loader(pretrained=pretrained)
        self.features = vgg.features

        # init fully connected part of vgg
        test_ipt = Variable(torch.zeros(1,3,ipt_size[0],ipt_size[1]))
        test_out = vgg.features(test_ipt)
        self.n_features = test_out.size(1) * test_out.size(2) * test_out.size(3)
        self.classifier = nn.Sequential(nn.Linear(self.n_features, 4096),
                                        nn.ReLU(True),
                                        nn.Dropout(),
                                        nn.Linear(4096, 4096),
                                        nn.ReLU(True),
                                        nn.Dropout(),
                                        nn.Linear(4096, num_classes)
                                       )
        self._init_classifier_weights()

    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size(0), -1)
        x = self.classifier(x)
        return x

    def _init_classifier_weights(self):
        for m in self.classifier:
            if isinstance(m, nn.Linear):
                m.weight.data.normal_(0, 0.01)
                m.bias.data.zero_()

To create a vgg just call this:

vgg = Custom_VGG(ipt_size=(128, 128), pretrained=True)