TwoBranchCNN1D.py 3.85 KiB
# resnet model
# when tuning start with learning rate->mini_batch_size ->
# momentum-> #hidden_units -> # learning_rate_decay -> #layers
import tensorflow.keras as tfk
import numpy as np
import time

#import matplotlib
#from utils.utils import save_test_duration

#matplotlib.use('agg')
#import matplotlib.pyplot as plt

#from utils.utils import save_logs
#from utils.utils import calculate_metrics


class TwoBranchCNN1D:

    def getBranch(self, input_layer, n_filters, dropOut, suffix):
        conv_x = tfk.layers.Conv1D(filters=n_feature_maps, kernel_size=5, padding='valid', name="conv1_"+suffix, activation="relu")(input_layer)
        conv_x = tfk.layers.BatchNormalization(name="bn1_"+suffix)(conv_x)
        conv_x = tfk.layers.Dropout(dropOut, name="dropOut1_"+suffix)(conv_x)

        conv_x = tfk.layers.Conv1D(filters=n_feature_maps, kernel_size=3, padding='valid', name="conv2_"+suffix, activation="relu")(conv_x)
        conv_x = tfk.layers.BatchNormalization(name="bn2_"+suffix)(conv_x)
        conv_x = tfk.layers.Dropout(dropOut, name="dropOut2_"+suffix)(conv_x)

        conv_x = tfk.layers.Conv1D(filters=n_feature_maps*2, kernel_size=3, padding='valid', name="conv3_"+suffix, activation="relu")(conv_x)
        conv_x = tfk.layers.BatchNormalization(name="bn3_"+suffix)(conv_x)
        conv_x = tfk.layers.Dropout(dropOut,name="dropOut3_"+suffix)(conv_x)

        conv_x = tfk.layers.Conv1D(filters=n_feature_maps*2, kernel_size=1, padding='valid', name="conv4_"+suffix, activation="relu")(conv_x)
        conv_x = tfk.layers.BatchNormalization(name="bn4_"+suffix)(conv_x)
        conv_x = tfk.layers.Dropout(dropOut,name="dropOut4_"+suffix)(conv_x)

        return conv_x



    def __init__(self, output_directory, input_shape1, input_shape2, nb_classes, verbose=False, build=True, load_weights=False):
        #self.output_directory = output_directory
        self.model = self.build_model(input_shape1, input_shape2, nb_classes)
        self.callbacks = None
        return self.model

    def build_model(self, input_shape1, input_shape2, nb_classes):
        n_feature_maps = 128
        dropOut = 0.2

        input_layer1 = tfk.layers.Input(name="input1", shape=input_shape1)
        input_layer2 = tfk.layers.Input(name="input2", shape=input_shape2)

        features1 = self.getBranch(input_layer1, n_feature_maps, dropOut, "input1")
        features2 = self.getBranch(input_layer2, n_feature_maps, dropOut, "input2")

        features = tfk.Concatenation(name="concat_layer")([features1, features2])
        dense_layer = tfk.layers.Dense(512, activation='relu')(features)
        dense_layer = tfk.layers.Dense(512, activation='relu')(dense_layer)
        output_layer = tfk.layers.Dense(nb_classes, activation='softmax')(dense_layer)

        model = tfk.models.Model(inputs=[input_layer1, input_layer2], outputs=output_layer)

        model.compile(loss='categorical_crossentropy', optimizer=tfk.optimizers.Adam(), metrics=['accuracy'])

        reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=50, min_lr=0.0001)

        #file_path = self.output_directory + 'best_model.hdf5'

        #model_checkpoint = keras.callbacks.ModelCheckpoint(filepath=file_path, monitor='loss',
        #                                                   save_best_only=True)

        self.callbacks = [reduce_lr]#, model_checkpoint]

        return model#, encoder
    '''
    def fit(self, x_train1, x_train2, y_train, batch_size, nb_epochs):
        mini_batch_size = int(min(x_train.shape[0] / 10, batch_size))
        start_time = time.time()

        hist = self.model.fit([x_train1, x_train2], y_train, batch_size=mini_batch_size, epochs=nb_epochs,
                              verbose=self.verbose, callbacks=self.callbacks)


    def predict(self, x_test1, x_test2):
        start_time = time.time()
        y_pred = model.predict([x_test1, x_test2])
        return y_pred
    '''