Commit 511eefe8 authored by Ienco Dino's avatar Ienco Dino
Browse files

Initial commit

parents
No related merge requests found
Showing with 165 additions and 0 deletions
+165 -0
BaseModels.py 0 → 100644
import tensorflow.keras as tfk
import tensorflow as tf
class CNN1D(tf.keras.Model):
def __init__(self, n_filters, suffix, dropout_rate = 0.0, hidden_activation='relu', name='CNN1D', **kwargs):
super(CNN1D, self).__init__(name=name, **kwargs)
self.conv1 = tfk.layers.Conv1D(filters=n_filters, kernel_size=3, padding='valid', name="conv1_"+suffix, activation="relu")
self.bn1 = tfk.layers.BatchNormalization(name="bn1_"+suffix)
self.do1 = tfk.layers.Dropout(rate=dropout_rate, name="dropOut1_"+suffix)
self.conv2 = tfk.layers.Conv1D(filters=n_filters, kernel_size=3, padding='valid', name="conv2_"+suffix, activation="relu")
self.bn2 = tfk.layers.BatchNormalization(name="bn2_"+suffix)
self.do2 = tfk.layers.Dropout(rate=dropout_rate, name="dropOut2_"+suffix)
self.conv3 = tfk.layers.Conv1D(filters=n_filters*2, kernel_size=3, padding='valid', name="conv3_"+suffix, activation="relu")
self.bn3 = tfk.layers.BatchNormalization(name="bn3_"+suffix)
self.do3 = tfk.layers.Dropout(rate=dropout_rate, name="dropOut3_"+suffix)
self.conv4 = tfk.layers.Conv1D(filters=n_filters*2, kernel_size=1, padding='valid', name="conv4_"+suffix, activation="relu")
self.bn4 = tfk.layers.BatchNormalization(name="bn4_"+suffix)
self.do4 = tfk.layers.Dropout(rate=dropout_rate, name="dropOut4_"+suffix)
self.pool = tfk.layers.GlobalAveragePooling1D()
@tf.function
def call(self, inputs, training=False):
conv1 = self.conv1(inputs)
conv1 = self.bn1(conv1)
conv1 = self.do1(conv1, training=training)
conv2 = self.conv2(conv1)
conv2 = self.bn2(conv2)
conv2 = self.do2(conv2, training=training)
conv3 = self.conv3(conv2)
conv3 = self.bn3(conv3)
conv3 = self.do3(conv3, training=training)
conv4 = self.conv4(conv3)
conv4 = self.bn4(conv4)
conv4 = self.do4(conv4, training=training)
pool = self.pool(conv4)
return pool
class TwoBranchModel(tf.keras.Model):
def __init__(self, n_filters, suffix, nb_classes, dropout_rate = 0.0, hidden_activation='relu', name='TwoBranchModel', **kwargs):
super(TwoBranchModel, self).__init__(name=name, **kwargs)
self.PixelBranch = CNN1D(n_filters, suffix, dropout_rate=dropout_rate)
self.ObjBranch = CNN1D(n_filters, suffix, dropout_rate=dropout_rate)
self.dense1 = tfk.layers.Dense(512, activation='relu')
self.dense2 = tfk.layers.Dense(512, activation='relu')
self.classif = tfk.layers.Dense(nb_classes, activation='softmax')
@tf.function
def call(self, inputs, training=False):
pixel_inputs, obj_inputs = inputs
branchP = self.PixelBranch(pixel_inputs, training=training)
branchO = self.ObjBranch(obj_inputs, training=training)
feat = tf.concat([branchP, branchO], axis=1)
output = self.dense1(feat)
output = self.dense2(output)
classif = self.classif(output)
return classif
launch.sh 0 → 100644
#!/bin/sh
dir="data_split"
python main.py $dir/train_X_pxl_$1.npy $dir/train_X_median_$1.npy $dir/train_y_$1.npy $dir/valid_X_pxl_$1.npy $dir/valid_X_median_$1.npy $dir/valid_y_$1.npy modelTwoBranch $1
main.py 0 → 100644
import numpy as np
import sys
from BaseModels import TwoBranchModel
from sklearn.metrics import f1_score, r2_score
import tensorflow as tf
from sklearn.utils import shuffle
import time
from sklearn.utils.extmath import softmax
def getBatch(X, i, batch_size):
start_id = i*batch_size
t = (i+1) * batch_size
end_id = min( (i+1) * batch_size, X.shape[0])
batch_x = X[start_id:end_id]
return batch_x
#@tf.function
#trainClassifS1(model, discr, ts_train_S2, featTHRS, label_train, loss_function, optimizer2, optimizerDI, BATCH_SIZE, e, n_classes)
#trainClassifS2(model, ts_train_S2_pixel, ts_train_S2_obj, label_train, loss_function, optimizer, BATCH_SIZE)
def trainClassifS2(model, x_train_S2_pixel, x_train_S2_obj, y_train, loss_object, optimizer, BATCH_SIZE):
loss_iteration = 0
iterations = x_train_S2_pixel.shape[0] / BATCH_SIZE
if x_train_S2_pixel.shape[0] % BATCH_SIZE != 0:
iterations += 1
for ibatch in range(int(iterations)):
batch_x_S2_p = getBatch(x_train_S2_pixel, ibatch, BATCH_SIZE)
batch_x_S2_obj = getBatch(x_train_S2_obj, ibatch, BATCH_SIZE)
batch_y = getBatch(y_train, ibatch, BATCH_SIZE)
with tf.GradientTape() as gen_tape:
mainEstim = model((batch_x_S2_p, batch_x_S2_obj), training=True)
loss = loss_object(batch_y, mainEstim)
grad_of_G = gen_tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grad_of_G, model.trainable_variables))
tot_loss+=loss
return (tot_loss / iterations)
ts_train_S2_pixel = np.load(sys.argv[1])
ts_train_S2_obj = np.load(sys.argv[2])
print(ts_train_S2_pixel)
print(ts_train_S2_obj)
exit()
label_train = np.load(sys.argv[3])
label_train = label_train-1
ts_valid_S2_pixel = np.load(sys.argv[4])
ts_valid_S2_obj = np.load(sys.argv[5])
label_valid = np.load(sys.argv[6])
label_valid = label_valid-1
output_dir_models = sys.argv[7]
split_id = sys.argv[8]
n_classes = len(np.unique(label_train))
model = TwoBranchModel(128, "model", n_classes, dropout_rate=0.2)
label_train = tf.keras.utils.to_categorical(label_train)
print("model created")
#DI = Discr()
""" defining loss function and the optimizer to use in the training phase """
#loss_object = tf.keras.losses.Huber()
loss_function = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)
BATCH_SIZE = 256
n_epochs = 1000
best_valid_fMeasure = 0
for e in range(n_epochs):
ts_train_S2_pixel, ts_train_S2_obj, label_train = shuffle(ts_train_S2_pixel, ts_train_S2_obj, label_train)
start = time.time()
trainLoss = trainClassifS2(model, ts_train_S2_pixel, ts_train_S2_obj, label_train, loss_function, optimizer, BATCH_SIZE)
end = time.time()
elapsed = end - start
pred = model.predict((ts_valid_S2_pixel, ts_valid_S2_obj))
fscore = f1_score(label_valid, np.argmax(pred,axis=1), average="weighted")
if fscore > best_valid_fMeasure:
best_valid_fMeasure = fscore
model.save_weights(output_dir_models+"/model_"+split_id)
print("epoch %d with loss %f and F-Measure on validation %f in %f seconds" % (e, trainLoss, fscore, elapsed))
print(f1_score(label_valid, np.argmax(pred,axis=1), average=None))
sys.stdout.flush()
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment