TwoBranchCNN1D.py 2.55 KiB
# resnet model
# when tuning start with learning rate->mini_batch_size ->
# momentum-> #hidden_units -> # learning_rate_decay -> #layers
import tensorflow.keras as tfk
import numpy as np
import time

#import matplotlib
#from utils.utils import save_test_duration

#matplotlib.use('agg')
#import matplotlib.pyplot as plt

#from utils.utils import save_logs
#from utils.utils import calculate_metrics




def getBranch(input_layer, n_filters, dropOut, suffix):
    conv_x = tfk.layers.Conv1D(filters=n_feature_maps, kernel_size=5, padding='valid', name="conv1_"+suffix, activation="relu")(input_layer)
    conv_x = tfk.layers.BatchNormalization(name="bn1_"+suffix)(conv_x)
    conv_x = tfk.layers.Dropout(dropOut, name="dropOut1_"+suffix)(conv_x)

    conv_x = tfk.layers.Conv1D(filters=n_feature_maps, kernel_size=3, padding='valid', name="conv2_"+suffix, activation="relu")(conv_x)
    conv_x = tfk.layers.BatchNormalization(name="bn2_"+suffix)(conv_x)
    conv_x = tfk.layers.Dropout(dropOut, name="dropOut2_"+suffix)(conv_x)

    conv_x = tfk.layers.Conv1D(filters=n_feature_maps*2, kernel_size=3, padding='valid', name="conv3_"+suffix, activation="relu")(conv_x)
    conv_x = tfk.layers.BatchNormalization(name="bn3_"+suffix)(conv_x)
    conv_x = tfk.layers.Dropout(dropOut,name="dropOut3_"+suffix)(conv_x)

    conv_x = tfk.layers.Conv1D(filters=n_feature_maps*2, kernel_size=1, padding='valid', name="conv4_"+suffix, activation="relu")(conv_x)
    conv_x = tfk.layers.BatchNormalization(name="bn4_"+suffix)(conv_x)
    conv_x = tfk.layers.Dropout(dropOut,name="dropOut4_"+suffix)(conv_x)

    return conv_x




def build_model(input_shape1, input_shape2, nb_classes):
    n_feature_maps = 128
    dropOut = 0.2

    input_layer1 = tfk.layers.Input(name="input1", shape=input_shape1)
    input_layer2 = tfk.layers.Input(name="input2", shape=input_shape2)

    features1 = getBranch(input_layer1, n_feature_maps, dropOut, "input1")
    features2 = getBranch(input_layer2, n_feature_maps, dropOut, "input2")

    features = tfk.Concatenation(name="concat_layer")([features1, features2])
    dense_layer = tfk.layers.Dense(512, activation='relu')(features)
    dense_layer = tfk.layers.Dense(512, activation='relu')(dense_layer)
    output_layer = tfk.layers.Dense(nb_classes, activation='softmax')(dense_layer)

    model = tfk.models.Model(inputs=[input_layer1, input_layer2], outputs=output_layer)
    #the model compile can be done outside this method or inside, as you prefer
    #model.compile(loss='categorical_crossentropy', optimizer=tfk.optimizers.Adam(), metrics=['accuracy'])

    return model