Commit bea141b5 authored by Cresson Remi's avatar Cresson Remi

Merge branch 'develop' into patchesselection

parents ba90833f c76fe1be
......@@ -50,14 +50,14 @@ namespace otb
* Target nodes names of the TensorFlow graph that must be triggered can be set
* with the SetTargetNodesNames.
*
* The OutputTensorNames consists in a strd::vector of std::string, and
* The OutputTensorNames consists in a std::vector of std::string, and
* corresponds to the names of tensors that will be computed during the session.
* As for input placeholders, output tensors field of expression
* (OutputExpressionFields, a std::vector of SizeType), i.e. the output
* space that the TensorFlow model will "generate", must be provided.
*
* Finally, a list of scalar placeholders can be fed in the form of std::vector
* of std::string, each one expressing the assigment of a signle valued
* of std::string, each one expressing the assignment of a single valued
* placeholder, e.g. "drop_rate=0.5 learning_rate=0.002 toto=true".
* See otb::tf::ExpressionToTensor() to know more about syntax.
*
......
......@@ -23,7 +23,15 @@ namespace otb
/**
* \class TensorflowMultisourceModelLearningBase
* \brief This filter is the base class for all learning filters.
* \brief This filter is the base class for all filters that input patches images.
*
* One input patches image consist in an image of size (pszx, pszy*n, nbands) where:
* -pszx : is the width of one patch
* -pszy : is the height of one patch
* -n : is the number of patches in the patches image
* -nbands : is the number of channels in the patches image
*
* This filter verify that every patches images are consistent.
*
* The batch size can be set using the SetBatchSize() method.
* The streaming can be activated to allow the processing of huge datasets.
......
......@@ -21,6 +21,9 @@
// TF common
#include "otbTensorflowCommon.h"
// Tree iterator
#include "itkPreOrderTreeIterator.h"
namespace otb
{
......
......@@ -11,8 +11,8 @@
#ifndef otbTensorflowStreamerFilter_h
#define otbTensorflowStreamerFilter_h
// Image2image
#include "itkImageToImageFilter.h"
#include "itkProgressReporter.h"
namespace otb
{
......
from tricks import *
import sys
import os
nclasses=8
def myModel(x1,x2):
# The XS branch (input patches: 8x8x4)
conv1_x1 = tf.layers.conv2d(inputs=x1, filters=16, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 4x4x16
conv2_x1 = tf.layers.conv2d(inputs=conv1_x1, filters=32, kernel_size=[3,3], padding="valid",
activation=tf.nn.relu) # out size: 2x2x32
conv3_x1 = tf.layers.conv2d(inputs=conv2_x1, filters=64, kernel_size=[2,2], padding="valid",
activation=tf.nn.relu) # out size: 1x1x64
# The PAN branch (input patches: 32x32x1)
conv1_x2 = tf.layers.conv2d(inputs=x2, filters=16, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 28x28x16
pool1_x2 = tf.layers.max_pooling2d(inputs=conv1_x2, pool_size=[2, 2],
strides=2) # out size: 14x14x16
conv2_x2 = tf.layers.conv2d(inputs=pool1_x2, filters=32, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 10x10x32
pool2_x2 = tf.layers.max_pooling2d(inputs=conv2_x2, pool_size=[2, 2],
strides=2) # out size: 5x5x32
conv3_x2 = tf.layers.conv2d(inputs=pool2_x2, filters=64, kernel_size=[3,3], padding="valid",
activation=tf.nn.relu) # out size: 3x3x64
conv4_x2 = tf.layers.conv2d(inputs=conv3_x2, filters=64, kernel_size=[3,3], padding="valid",
activation=tf.nn.relu) # out size: 1x1x64
# Stack features
features = tf.reshape(tf.stack([conv3_x1, conv4_x2], axis=3),
shape=[-1, 128], name="features")
# 8 neurons for 8 classes
estimated = tf.layers.dense(inputs=features, units=nclasses, activation=None)
estimated_label = tf.argmax(estimated, 1, name="prediction")
return estimated, estimated_label
""" Main """
# check number of arguments
if len(sys.argv) != 2:
print("Usage : <output directory for SavedModel>")
sys.exit(1)
# Create the graph
with tf.Graph().as_default():
# Placeholders
x1 = tf.placeholder(tf.float32, [None, None, None, 4], name="x1")
x2 = tf.placeholder(tf.float32, [None, None, None, 1], name="x2")
y = tf.placeholder(tf.int32 , [None, None, None, 1], name="y")
lr = tf.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]),
shape=[], name="lr")
# Output
y_estimated, y_label = myModel(x1,x2)
# Loss function
cost = tf.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
logits=tf.reshape(y_estimated, [-1, nclasses]))
# Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=lr, name="optimizer").minimize(cost)
# Initializer, saver, session
init = tf.global_variables_initializer()
saver = tf.train.Saver( max_to_keep=20 )
sess = tf.Session()
sess.run(init)
# Create a SavedModel
CreateSavedModel(sess, ["x1:0", "x2:0", "y:0"], ["features:0", "prediction:0"], sys.argv[1])
from tricks import *
import sys
import os
nclasses=8
def myModel(x):
# input patches: 16x16x4
conv1 = tf.layers.conv2d(inputs=x, filters=16, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 12x12x16
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) # out: 6x6x16
conv2 = tf.layers.conv2d(inputs=pool1, filters=16, kernel_size=[3,3], padding="valid",
activation=tf.nn.relu) # out size: 4x4x16
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # out: 2x2x16
conv3 = tf.layers.conv2d(inputs=pool2, filters=32, kernel_size=[2,2], padding="valid",
activation=tf.nn.relu) # out size: 1x1x32
# Features
features = tf.reshape(conv3, shape=[-1, 32], name="features")
# 8 neurons for 8 classes
estimated = tf.layers.dense(inputs=features, units=nclasses, activation=None)
estimated_label = tf.argmax(estimated, 1, name="prediction")
return estimated, estimated_label
""" Main """
if len(sys.argv) != 2:
print("Usage : <output directory for SavedModel>")
sys.exit(1)
# Create the TensorFlow graph
with tf.Graph().as_default():
# Placeholders
x = tf.placeholder(tf.float32, [None, None, None, 4], name="x")
y = tf.placeholder(tf.int32 , [None, None, None, 1], name="y")
lr = tf.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]),
shape=[], name="lr")
# Output
y_estimated, y_label = myModel(x)
# Loss function
cost = tf.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
logits=tf.reshape(y_estimated, [-1, nclasses]))
# Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=lr, name="optimizer").minimize(cost)
# Initializer, saver, session
init = tf.global_variables_initializer()
saver = tf.train.Saver( max_to_keep=20 )
sess = tf.Session()
sess.run(init)
# Create a SavedModel
CreateSavedModel(sess, ["x:0", "y:0"], ["features:0", "prediction:0"], sys.argv[1])
from tricks import *
import sys
import os
nclasses=8
def myModel(x):
# input patches: 16x16x4
conv1 = tf.layers.conv2d(inputs=x, filters=16, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 12x12x16
conv2 = tf.layers.conv2d(inputs=conv1, filters=16, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 8x8x16
conv3 = tf.layers.conv2d(inputs=conv2, filters=32, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 4x4x32
conv4 = tf.layers.conv2d(inputs=conv3, filters=32, kernel_size=[4,4], padding="valid",
activation=tf.nn.relu) # out size: 1x1x32
# Features
features = tf.reshape(conv4, shape=[-1, 32], name="features")
# 8 neurons for 8 classes
estimated = tf.layers.dense(inputs=features, units=nclasses, activation=None)
estimated_label = tf.argmax(estimated, 1, name="prediction")
return estimated, estimated_label
""" Main """
if len(sys.argv) != 2:
print("Usage : <output directory for SavedModel>")
sys.exit(1)
# Create the TensorFlow graph
with tf.Graph().as_default():
# Placeholders
x = tf.placeholder(tf.float32, [None, None, None, 4], name="x")
y = tf.placeholder(tf.int32 , [None, None, None, 1], name="y")
lr = tf.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]),
shape=[], name="lr")
# Output
y_estimated, y_label = myModel(x)
# Loss function
cost = tf.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
logits=tf.reshape(y_estimated, [-1, nclasses]))
# Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=lr, name="optimizer").minimize(cost)
# Initializer, saver, session
init = tf.global_variables_initializer()
saver = tf.train.Saver( max_to_keep=20 )
sess = tf.Session()
sess.run(init)
# Create a SavedModel
CreateSavedModel(sess, ["x:0", "y:0"], ["features:0", "prediction:0"], sys.argv[1])
otb_module_test()
# Directories
set(DATADIR ${CMAKE_CURRENT_SOURCE_DIR}/data)
set(MODELSDIR ${CMAKE_CURRENT_SOURCE_DIR}/models)
# Input images
set(IMAGEXS ${DATADIR}/xs_subset.tif)
set(IMAGEPAN ${DATADIR}/pan_subset.tif)
set(IMAGEPXS ${DATADIR}/pxs_subset.tif)
# Input models
set(MODEL1 ${MODELSDIR}/model1)
set(MODEL2 ${MODELSDIR}/model2)
set(MODEL3 ${MODELSDIR}/model3)
# Output images and baselines
set(MODEL1_PB_OUT apTvClTensorflowModelServeCNN16x16PB.tif)
set(MODEL2_PB_OUT apTvClTensorflowModelServeCNN8x8_32x32PB.tif)
set(MODEL2_FC_OUT apTvClTensorflowModelServeCNN8x8_32x32FC.tif)
set(MODEL3_PB_OUT apTvClTensorflowModelServeFCNN16x16PB.tif)
set(MODEL3_FC_OUT apTvClTensorflowModelServeFCNN16x16FC.tif)
#----------- Model serving : 1-branch CNN (16x16) Patch-Based ----------------
otb_test_application(NAME TensorflowModelServeCNN16x16PB
APP TensorflowModelServe
OPTIONS -source1.il ${IMAGEPXS}
-source1.rfieldx 16 -source1.rfieldy 16 -source1.placeholder x
-model.dir ${MODEL1} -output.names prediction
-out ${TEMP}/${MODEL1_PB_OUT}
VALID --compare-image ${EPSILON_6}
${DATADIR}/${MODEL1_PB_OUT}
${TEMP}/${MODEL1_PB_OUT})
#----------- Model serving : 2-branch CNN (8x8, 32x32) Patch-Based ----------------
otb_test_application(NAME apTvClTensorflowModelServeCNN8x8_32x32PB
APP TensorflowModelServe
OPTIONS -source1.il ${IMAGEXS}
-source1.rfieldx 8 -source1.rfieldy 8 -source1.placeholder x1
-source2.il ${IMAGEPAN}
-source2.rfieldx 32 -source2.rfieldy 32 -source2.placeholder x2
-model.dir ${MODEL2} -output.names prediction
-out ${TEMP}/${MODEL2_PB_OUT}
VALID --compare-image ${EPSILON_6}
${DATADIR}/${MODEL2_PB_OUT}
${TEMP}/${MODEL2_PB_OUT})
set_tests_properties(apTvClTensorflowModelServeCNN8x8_32x32PB PROPERTIES ENVIRONMENT "OTB_TF_NSOURCES=2;$ENV{OTB_TF_NSOURCES}")
#----------- Model serving : 2-branch CNN (8x8, 32x32) Fully-Conv ----------------
set(ENV{OTB_TF_NSOURCES} 2)
otb_test_application(NAME apTvClTensorflowModelServeCNN8x8_32x32FC
APP TensorflowModelServe
OPTIONS -source1.il ${IMAGEXS}
-source1.rfieldx 8 -source1.rfieldy 8 -source1.placeholder x1
-source2.il ${IMAGEPAN}
-source2.rfieldx 32 -source2.rfieldy 32 -source2.placeholder x2
-model.dir ${MODEL2} -output.names prediction -output.spcscale 4
-out ${TEMP}/${MODEL2_FC_OUT}
VALID --compare-image ${EPSILON_6}
${DATADIR}/${MODEL2_FC_OUT}
${TEMP}/${MODEL2_FC_OUT})
set_tests_properties(apTvClTensorflowModelServeCNN8x8_32x32FC PROPERTIES ENVIRONMENT "OTB_TF_NSOURCES=2;$ENV{OTB_TF_NSOURCES}")
#----------- Model serving : 1-branch FCNN (16x16) Patch-Based ----------------
set(ENV{OTB_TF_NSOURCES} 1)
otb_test_application(NAME apTvClTensorflowModelServeFCNN16x16PB
APP TensorflowModelServe
OPTIONS -source1.il ${IMAGEPXS}
-source1.rfieldx 16 -source1.rfieldy 16 -source1.placeholder x
-model.dir ${MODEL3} -output.names prediction
-out ${TEMP}/${MODEL3_PB_OUT}
VALID --compare-image ${EPSILON_6}
${DATADIR}/${MODEL3_PB_OUT}
${TEMP}/${MODEL3_PB_OUT})
#----------- Model serving : 1-branch FCNN (16x16) Fully-conv ----------------
set(ENV{OTB_TF_NSOURCES} 1)
otb_test_application(NAME apTvClTensorflowModelServeFCNN16x16FC
APP TensorflowModelServe
OPTIONS -source1.il ${IMAGEPXS}
-source1.rfieldx 16 -source1.rfieldy 16 -source1.placeholder x
-model.dir ${MODEL3} -output.names prediction -model.fullyconv on
-out ${TEMP}/${MODEL3_FC_OUT}
VALID --compare-image ${EPSILON_6}
${DATADIR}/${MODEL3_FC_OUT}
${TEMP}/${MODEL3_FC_OUT})
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment