Commit 8cc0e197 authored by Cresson Remi's avatar Cresson Remi
Browse files

WIP: migrade python codes from TF1.X --> TF2.X

Showing with 479 additions and 507 deletions
+479 -507
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
#========================================================================== # ==========================================================================
# #
# Copyright 2018-2019 Remi Cresson (IRSTEA) # Copyright 2018-2019 Remi Cresson (IRSTEA)
# Copyright 2020 Remi Cresson (INRAE) # Copyright 2020 Remi Cresson (INRAE)
...@@ -16,28 +16,24 @@ ...@@ -16,28 +16,24 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
#==========================================================================*/ # ==========================================================================*/
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse import argparse
from tricks import CheckpointToSavedModel from tricks import ckpt_to_savedmodel
# Parser # Parser
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--ckpt", help="Checkpoint file (without the \".meta\" extension)", required=True) parser.add_argument("--ckpt", help="Checkpoint file (without the \".meta\" extension)", required=True)
parser.add_argument("--inputs", help="Inputs names (e.g. [\"x_cnn_1:0\", \"x_cnn_2:0\"])", required=True, nargs='+') parser.add_argument("--inputs", help="Inputs names (e.g. [\"x_cnn_1:0\", \"x_cnn_2:0\"])", required=True, nargs='+')
parser.add_argument("--outputs", help="Outputs names (e.g. [\"prediction:0\", \"features:0\"])", required=True, nargs='+') parser.add_argument("--outputs", help="Outputs names (e.g. [\"prediction:0\", \"features:0\"])", required=True,
parser.add_argument("--model", help="Output directory for SavedModel", required=True) nargs='+')
parser.add_argument("--model", help="Output directory for SavedModel", required=True)
parser.add_argument('--clear_devices', dest='clear_devices', action='store_true') parser.add_argument('--clear_devices', dest='clear_devices', action='store_true')
parser.set_defaults(clear_devices=False) parser.set_defaults(clear_devices=False)
params = parser.parse_args() params = parser.parse_args()
if __name__ == "__main__": if __name__ == "__main__":
CheckpointToSavedModel(ckpt_path=params.ckpt, ckpt_to_savedmodel(ckpt_path=params.ckpt,
inputs=params.inputs, inputs=params.inputs,
outputs=params.outputs, outputs=params.outputs,
savedmodel_path=params.model, savedmodel_path=params.model,
clear_devices=params.clear_devices) clear_devices=params.clear_devices)
quit()
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
#========================================================================== # ==========================================================================
# #
# Copyright Remi Cresson, Dino Ienco (IRSTEA) # Copyright Remi Cresson, Dino Ienco (IRSTEA)
# #
...@@ -15,208 +15,196 @@ ...@@ -15,208 +15,196 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
#==========================================================================*/ # ==========================================================================*/
# Reference: # Reference:
# #
# Benedetti, P., Ienco, D., Gaetano, R., Ose, K., Pensa, R. G., & Dupuy, S. (2018) # Benedetti, P., Ienco, D., Gaetano, R., Ose, K., Pensa, R. G., & Dupuy, S. (2018)
# M3Fusion: A Deep Learning Architecture for Multiscale Multimodal Multitemporal # M3Fusion: A Deep Learning Architecture for Multiscale Multimodal Multitemporal
# Satellite Data Fusion. IEEE Journal of Selected Topics in Applied Earth # Satellite Data Fusion. IEEE Journal of Selected Topics in Applied Earth
# Observations and Remote Sensing, 11(12), 4939-4949. # Observations and Remote Sensing, 11(12), 4939-4949.
from tricks import * from tricks import create_savedmodel
from tensorflow.contrib import rnn import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tf.contrib import rnn
import argparse import argparse
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--nunits", type=int, default=1024, help="number of units") parser.add_argument("--nunits", type=int, default=1024, help="number of units")
parser.add_argument("--n_levels_lstm", type=int, default=1, help="number of lstm levels") parser.add_argument("--n_levels_lstm", type=int, default=1, help="number of lstm levels")
parser.add_argument("--hm_epochs", type=int, default=400, help="hm epochs") parser.add_argument("--hm_epochs", type=int, default=400, help="hm epochs")
parser.add_argument("--n_timestamps", type=int, default=37, help="number of images in timeseries") parser.add_argument("--n_timestamps", type=int, default=37, help="number of images in timeseries")
parser.add_argument("--n_dims", type=int, default=16, help="number of channels in timeseries images") parser.add_argument("--n_dims", type=int, default=16, help="number of channels in timeseries images")
parser.add_argument("--patch_window", type=int, default=25, help="patch size for the high-res image") parser.add_argument("--patch_window", type=int, default=25, help="patch size for the high-res image")
parser.add_argument("--n_channels", type=int, default=4, help="number of channels in the high-res image") parser.add_argument("--n_channels", type=int, default=4, help="number of channels in the high-res image")
parser.add_argument("--nclasses", type=int, default=8, help="number of classes") parser.add_argument("--nclasses", type=int, default=8, help="number of classes")
parser.add_argument("--outdir", help="Output directory for SavedModel", required=True) parser.add_argument("--outdir", help="Output directory for SavedModel", required=True)
params = parser.parse_args() params = parser.parse_args()
def RnnAttention(x, nunits, nlayer, n_dims, n_timetamps, is_training_ph): def RnnAttention(x, nunits, nlayer, n_dims, n_timetamps, is_training_ph):
N = tf.shape(x)[0] # size of batch
x = tf.reshape(x, [N, n_dims, n_timetamps])
# at this point x must be 1 tensor of shape [N, n_dims, n_timestamps]
# (before unstack) x is 1 tensor of shape [N, n_dims, n_timestamps]
x = tf.unstack(x, n_timetamps, axis=2)
# (after unstack) x is a list of "n_timestamps" tensors of shape: [N, n_dims]
# NETWORK DEF
# MORE THEN ONE LAYER: list of LSTMcell,nunits hidden units each, for each layer
if nlayer > 1:
cells = []
for _ in range(nlayer):
cell = rnn.GRUCell(nunits)
cells.append(cell)
cell = tf.compat.v1.contrib.rnn.MultiRNNCell(cells)
# SIGNLE LAYER: single GRUCell, nunits hidden units each
else:
cell = rnn.GRUCell(nunits)
outputs, _ = rnn.static_rnn(cell, x, dtype="float32")
# At this point, outputs is a list of "n_timestamps" tensors [N, B, C]
outputs = tf.stack(outputs, axis=1)
# At this point, outputs is a tensor of size [N, n_timestamps, B, C]
# Trainable parameters
attention_size = nunits # int(nunits / 2)
W_omega = tf.Variable(tf.random_normal([nunits, attention_size], stddev=0.1))
b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
u_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
# Applying fully connected layer with non-linear activation to each of the B*T timestamps;
# the shape of `v` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
v = tf.tanh(tf.tensordot(outputs, W_omega, axes=1) + b_omega)
# For each of the timestamps its vector of size A from `v` is reduced with `u` vector
vu = tf.tensordot(v, u_omega, axes=1) # (B,T) shape
alphas = tf.nn.softmax(vu) # (B,T) shape also
output = tf.reduce_sum(outputs * tf.expand_dims(alphas, -1), 1)
output = tf.reshape(output, [-1, nunits])
return output
N = tf.shape(x)[0] # size of batch
x = tf.reshape(x, [N, n_dims, n_timetamps])
# at this point x must be 1 tensor of shape [N, n_dims, n_timestamps]
# (before unstack) x is 1 tensor of shape [N, n_dims, n_timestamps]
x = tf.unstack(x, n_timetamps, axis=2)
# (after unstack) x is a list of "n_timestamps" tensors of shape: [N, n_dims]
#NETWORK DEF
#MORE THEN ONE LAYER: list of LSTMcell,nunits hidden units each, for each layer
if nlayer>1:
cells=[]
for _ in range(nlayer):
cell = rnn.GRUCell(nunits)
cells.append(cell)
cell = tf.contrib.rnn.MultiRNNCell(cells)
#SIGNLE LAYER: single GRUCell, nunits hidden units each
else:
cell = rnn.GRUCell(nunits)
outputs,_=rnn.static_rnn(cell, x, dtype="float32")
# At this point, outputs is a list of "n_timestamps" tensors [N, B, C]
outputs = tf.stack(outputs, axis=1)
# At this point, outputs is a tensor of size [N, n_timestamps, B, C]
# Trainable parameters
attention_size = nunits #int(nunits / 2)
W_omega = tf.Variable(tf.random_normal([nunits, attention_size], stddev=0.1))
b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
u_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
# Applying fully connected layer with non-linear activation to each of the B*T timestamps;
# the shape of `v` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
v = tf.tanh(tf.tensordot(outputs, W_omega, axes=1) + b_omega)
# For each of the timestamps its vector of size A from `v` is reduced with `u` vector
vu = tf.tensordot(v, u_omega, axes=1) # (B,T) shape
alphas = tf.nn.softmax(vu) # (B,T) shape also
output = tf.reduce_sum(outputs * tf.expand_dims(alphas, -1), 1)
output = tf.reshape(output, [-1, nunits])
return output
def CNN(x, nunits): def CNN(x, nunits):
#nunits = 512 # nunits = 512
conv1 = tf.layers.conv2d( conv1 = tf.compat.v1.layers.conv2d(
inputs=x, inputs=x,
filters=nunits/2, #256 filters=nunits / 2, # 256
kernel_size=[7, 7], kernel_size=[7, 7],
padding="valid", padding="valid",
activation=tf.nn.relu) activation=tf.nn.relu)
conv1 = tf.layers.batch_normalization(conv1) conv1 = tf.compat.v1.layers.batch_normalization(conv1)
print_tensor_info("conv1", conv1)
pool1 = tf.compat.v1.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
print_tensor_info("pool1", pool1) conv2 = tf.compat.v1.layers.conv2d(
conv2 = tf.layers.conv2d(
inputs=pool1, inputs=pool1,
filters=nunits, filters=nunits,
kernel_size=[3, 3], kernel_size=[3, 3],
padding="valid", padding="valid",
activation=tf.nn.relu) activation=tf.nn.relu)
conv2 = tf.layers.batch_normalization(conv2)
print_tensor_info("conv2", conv2) conv2 = tf.compat.v1.layers.batch_normalization(conv2)
conv3 = tf.layers.conv2d( conv3 = tf.compat.v1.layers.conv2d(
inputs=conv2, inputs=conv2,
filters=nunits, filters=nunits,
kernel_size=[3, 3], kernel_size=[3, 3],
padding="same", padding="same",
activation=tf.nn.relu) activation=tf.nn.relu)
conv3 = tf.layers.batch_normalization(conv3) conv3 = tf.compat.v1.layers.batch_normalization(conv3)
print_tensor_info("conv3", conv3) conv3 = tf.compat.v1.concat([conv2, conv3], 3)
conv3 = tf.concat([conv2,conv3],3) conv4 = tf.compat.v1.layers.conv2d(
print_tensor_info("conv3 (final)", conv3)
conv4 = tf.layers.conv2d(
inputs=conv3, inputs=conv3,
filters=nunits, filters=nunits,
kernel_size=[1, 1], kernel_size=[1, 1],
padding="valid", padding="valid",
activation=tf.nn.relu) activation=tf.nn.relu)
conv4 = tf.layers.batch_normalization(conv4) conv4 = tf.compat.v1.layers.batch_normalization(conv4)
print_tensor_info("conv4", conv4) cnn = tf.reduce_mean(conv4, [1, 2])
cnn = tf.reduce_mean(conv4, [1,2]) tensor_shape = cnn.get_shape()
print_tensor_info("cnn", cnn) return cnn, tensor_shape[1].value
tensor_shape = cnn.get_shape()
def get_prediction(x_rnn, x_cnn, nunits, nlayer, nclasses, n_dims, n_timetamps):
return cnn, tensor_shape[1].value vec_rnn = RnnAttention(x_rnn, nunits, nlayer, n_dims, n_timetamps, is_training_ph)
vec_cnn, cnn_dim = CNN(x_cnn, 512)
def getPrediction(x_rnn, x_cnn, nunits, nlayer, nclasses, dropout, is_training, n_dims, n_timetamps):
features_learnt = None features_learnt = tf.concat([vec_rnn, vec_cnn], axis=1, name="features")
first_dim = cnn_dim + nunits
vec_rnn = RnnAttention(x_rnn, nunits, nlayer, n_dims, n_timetamps, is_training_ph)
vec_cnn, cnn_dim = CNN(x_cnn, 512) # Classifier1 #RNN Branch
outb1 = tf.Variable(tf.truncated_normal([nclasses]), name='B1')
features_learnt=tf.concat([vec_rnn,vec_cnn],axis=1, name="features") outw1 = tf.Variable(tf.truncated_normal([nunits, nclasses]), name='W1')
first_dim = cnn_dim + nunits pred_c1 = tf.matmul(vec_rnn, outw1) + outb1
#Classifier1 #RNN Branch # Classifier2 #CNN Branch
outb1 = tf.Variable(tf.truncated_normal([nclasses]),name='B1') outb2 = tf.Variable(tf.truncated_normal([nclasses]), name='B2')
outw1 = tf.Variable(tf.truncated_normal([nunits,nclasses]),name='W1') outw2 = tf.Variable(tf.truncated_normal([cnn_dim, nclasses]), name='W2')
pred_c1 = tf.matmul(vec_rnn,outw1)+outb1 pred_c2 = tf.matmul(vec_cnn, outw2) + outb2
#Classifier2 #CNN Branch # ClassifierFull
outb2 = tf.Variable(tf.truncated_normal([nclasses]),name='B2') outb = tf.Variable(tf.truncated_normal([nclasses]), name='B')
outw2 = tf.Variable(tf.truncated_normal([cnn_dim,nclasses]),name='W2') outw = tf.Variable(tf.truncated_normal([first_dim, nclasses]), name='W')
pred_c2 = tf.matmul(vec_cnn,outw2)+outb2 pred_full = tf.matmul(features_learnt, outw) + outb
#ClassifierFull return pred_c1, pred_c2, pred_full, features_learnt
outb = tf.Variable(tf.truncated_normal([nclasses]),name='B')
outw = tf.Variable(tf.truncated_normal([first_dim,nclasses]),name='W')
pred_full = tf.matmul(features_learnt,outw)+outb
return pred_c1, pred_c2, pred_full, features_learnt
###############################################################################
""" Main """
# Create the TensorFlow graph # Create the TensorFlow graph
with tf.Graph().as_default(): with tf.compat.v1.Graph().as_default():
x_rnn = tf.compat.v1.placeholder(tf.float32, [None, 1, 1, params.n_dims * params.n_timestamps], name="x_rnn")
x_rnn = tf.placeholder(tf.float32, [None, 1, 1, params.n_dims*params.n_timestamps], name="x_rnn") x_cnn = tf.compat.v1.placeholder(tf.float32, [None, params.patch_window, params.patch_window, params.n_channels],
x_cnn = tf.placeholder(tf.float32, [None, params.patch_window, params.patch_window, params.n_channels], name="x_cnn") name="x_cnn")
y = tf.placeholder(tf.int32, [None, 1, 1, 1], name="y") y = tf.compat.v1.placeholder(tf.int32, [None, 1, 1, 1], name="y")
learning_rate = tf.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]), shape=[], name="learning_rate") learning_rate = tf.compat.v1.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]), shape=[],
is_training_ph = tf.placeholder_with_default(tf.constant(False, dtype=tf.bool, shape=[]), shape=[], name="is_training") name="learning_rate")
dropout = tf.placeholder_with_default(tf.constant(0.5, dtype=tf.float32, shape=[]), shape=[], name="drop_rate") is_training_ph = tf.compat.v1.placeholder_with_default(tf.constant(False, dtype=tf.bool, shape=[]), shape=[],
name="is_training")
pred_c1, pred_c2, pred_full, features_learnt = getPrediction(x_rnn, dropout = tf.compat.v1.placeholder_with_default(tf.constant(0.5, dtype=tf.float32, shape=[]), shape=[],
x_cnn, name="drop_rate")
params.nunits,
params.n_levels_lstm, pred_c1, pred_c2, pred_full, features_learnt = get_prediction(x_rnn,
params.nclasses, x_cnn,
dropout, params.nunits,
is_training_ph, params.n_levels_lstm,
params.n_dims, params.nclasses,
params.n_timestamps) params.n_dims,
params.n_timestamps)
testPrediction = tf.argmax(pred_full, 1, name="prediction")
testPrediction = tf.argmax(pred_full, 1, name="prediction")
loss_full = tf.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
logits=tf.reshape(pred_full, [-1, params.nclasses])) loss_full = tf.compat.v1.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
loss_c1 = tf.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]), logits=tf.reshape(pred_full, [-1, params.nclasses]))
logits=tf.reshape(pred_c1, [-1, params.nclasses])) loss_c1 = tf.compat.v1.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
loss_c2 = tf.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]), logits=tf.reshape(pred_c1, [-1, params.nclasses]))
logits=tf.reshape(pred_c2, [-1, params.nclasses])) loss_c2 = tf.compat.v1.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
logits=tf.reshape(pred_c2, [-1, params.nclasses]))
cost = loss_full + (0.3 * loss_c1) + (0.3 * loss_c2)
cost = loss_full + (0.3 * loss_c1) + (0.3 * loss_c2)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, name="optimizer").minimize(cost)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate, name="optimizer").minimize(cost)
correct = tf.equal(tf.argmax(pred_full,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct,tf.float64)) correct = tf.equal(tf.argmax(pred_full, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float64))
# Initializer, saver, session
init = tf.global_variables_initializer() # Initializer, saver, session
saver = tf.train.Saver( max_to_keep=20 ) init = tf.compat.v1.global_variables_initializer()
sess = tf.Session() saver = tf.compat.v1.train.Saver(max_to_keep=20)
sess.run(init) sess = tf.compat.v1.Session()
sess.run(init)
CreateSavedModel(sess, ["x_cnn:0","x_rnn:0","y:0"], ["prediction:0"], params.outdir)
create_savedmodel(sess, ["x_cnn:0", "x_rnn:0", "y:0"], ["prediction:0"], params.outdir)
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
#========================================================================== #==========================================================================
# #
# Copyright Remi Cresson (IRSTEA) # Copyright 2018-2019 Remi Cresson (IRSTEA)
# Copyright 2020 Remi Cresson (INRAE)
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -19,12 +20,14 @@ ...@@ -19,12 +20,14 @@
# Reference: # Reference:
# #
# Maggiori, E., Tarabalka, Y., Charpiat, G., & Alliez, P. (2016). # Maggiori, E., Tarabalka, Y., Charpiat, G., & Alliez, P. (2016).
# "Convolutional neural networks for large-scale remote-sensing image classification." # "Convolutional neural networks for large-scale remote-sensing image classification."
# IEEE Transactions on Geoscience and Remote Sensing, 55(2), 645-657. # IEEE Transactions on Geoscience and Remote Sensing, 55(2), 645-657.
from tricks import *
import argparse import argparse
from tricks import create_savedmodel
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--outdir", help="Output directory for SavedModel", required=True) parser.add_argument("--outdir", help="Output directory for SavedModel", required=True)
...@@ -32,93 +35,69 @@ parser.add_argument("--n_channels", type=int, default=4, help="number of channel ...@@ -32,93 +35,69 @@ parser.add_argument("--n_channels", type=int, default=4, help="number of channel
params = parser.parse_args() params = parser.parse_args()
# Build the graph # Build the graph
with tf.Graph().as_default(): with tf.compat.v1.Graph().as_default():
# Size of patches # Size of patches
patch_size_xs = 80 patch_size_xs = 80
patch_size_label = 16 patch_size_label = 16
# placeholder for images and labels # placeholder for images and labels
istraining_placeholder = tf.placeholder_with_default(tf.constant(False , dtype=tf.bool, shape=[]), shape=[], name="is_training") lr = tf.compat.v1.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]), shape=[],
learning_rate = tf.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]), shape=[], name="learning_rate") name="learning_rate")
xs_placeholder = tf.placeholder(tf.float32, shape=(None, patch_size_xs, patch_size_xs, params.n_channels), name="x") x = tf.compat.v1.placeholder(tf.float32, shape=(None, patch_size_xs, patch_size_xs, params.n_channels), name="x")
labels_placeholder = tf.placeholder(tf.int32, shape=(None, patch_size_label, patch_size_label, 1), name="y") y = tf.compat.v1.placeholder(tf.int32, shape=(None, patch_size_label, patch_size_label, 1), name="y")
# Convolutional Layer #1 # Convolutional Layer #1
conv1 = tf.layers.conv2d( conv1 = tf.compat.v1.layers.conv2d(inputs=x, filters=64, kernel_size=[12, 12], padding="valid",
inputs=xs_placeholder, activation=tf.nn.crelu)
filters=64,
kernel_size=[12, 12], # Normalization of output of layer 1
padding="valid", norm1 = tf.compat.v1.layers.batch_normalization(conv1)
activation=tf.nn.crelu)
# pooling layer #1
# Normalization of output of layer 1 pool1 = tf.compat.v1.layers.max_pooling2d(inputs=norm1, pool_size=[4, 4], strides=4)
norm1 = tf.layers.batch_normalization(conv1)
# Convolutional Layer #2
# pooling layer #1 conv2 = tf.compat.v1.layers.conv2d(inputs=pool1, filters=112, kernel_size=[4, 4], padding="valid",
pool1 = tf.layers.max_pooling2d(inputs=norm1, pool_size=[4, 4], strides=4) activation=tf.nn.crelu)
# Convolutional Layer #2 # Normalization of output of layer 2
conv2 = tf.layers.conv2d( norm2 = tf.compat.v1.layers.batch_normalization(conv2)
inputs=pool1,
filters=112, # Convolutional Layer #3
kernel_size=[4, 4], conv3 = tf.compat.v1.layers.conv2d(inputs=norm2, filters=80, kernel_size=[3, 3], padding="valid",
padding="valid", activation=tf.nn.crelu)
activation=tf.nn.crelu)
# Normalization of output of layer 3
# Normalization of output of layer 2 norm3 = tf.compat.v1.layers.batch_normalization(conv3)
norm2 = tf.layers.batch_normalization(conv2)
# Convolutional Layer #4
# Convolutional Layer #3 conv4 = tf.compat.v1.layers.conv2d(inputs=norm3, filters=1, kernel_size=[8, 8], padding="valid",
conv3 = tf.layers.conv2d( activation=tf.nn.crelu)
inputs=norm2,
filters=80, # Deconv = conv on the padded/strided input, that is an (5+1)*4
kernel_size=[3, 3], deconv1 = tf.compat.v1.layers.conv2d_transpose(inputs=conv4, filters=1, strides=(4,4), kernel_size=[8, 8],
padding="valid", padding="valid", activation=tf.nn.sigmoid)
activation=tf.nn.crelu)
n = tf.shape(deconv1)[0]
# Normalization of output of layer 3 szx = tf.shape(deconv1)[1]
norm3 = tf.layers.batch_normalization(conv3) szy = tf.shape(deconv1)[2]
estimated = tf.slice(deconv1, [0, 4, 4, 0], [n, szx - 8, szy - 8, 1], "estimated")
# Convolutional Layer #4
conv4 = tf.layers.conv2d( # Loss
inputs=norm3, estimated_resized = tf.reshape(estimated, [-1, patch_size_label*patch_size_label])
filters=1, labels_resized = tf.reshape(y, [-1, patch_size_label*patch_size_label])
kernel_size=[8, 8], labels_resized = tf.cast(labels_resized, tf.float32)
padding="valid", loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_resized, logits=estimated_resized))
activation=tf.nn.crelu)
print_tensor_info("conv4",conv4) # Optimizer
train_op = tf.compat.v1.train.AdamOptimizer(lr, name="optimizer").minimize(loss)
# Deconv = conv on the padded/strided input, that is an (5+1)*4
deconv1 = tf.layers.conv2d_transpose( # Initializer, saver, session
inputs=conv4, init = tf.compat.v1.global_variables_initializer()
filters=1, saver = tf.compat.v1.train.Saver(max_to_keep=20)
strides=(4,4), sess = tf.compat.v1.Session()
kernel_size=[8, 8], sess.run(init)
padding="valid",
activation=tf.nn.sigmoid) # Let's export a SavedModel
print_tensor_info("deconv1",deconv1) create_savedmodel(sess, ["x:0", "y:0", "is_training:0"], ["estimated:0"], params.outdir)
numbatch = tf.shape(deconv1)[0]
szx = tf.shape(deconv1)[1]
szy = tf.shape(deconv1)[2]
estimated = tf.slice(deconv1, [0, 4, 4, 0], [numbatch, szx - 8, szy - 8, 1], "estimated")
print_tensor_info("estimated", estimated)
# Loss
estimated_resized = tf.reshape(estimated, [-1, patch_size_label*patch_size_label])
labels_resized = tf.reshape(labels_placeholder, [-1, patch_size_label*patch_size_label])
labels_resized = tf.cast(labels_resized, tf.float32)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_resized, logits=estimated_resized))
# Optimizer
train_op = tf.train.AdamOptimizer(learning_rate, name="optimizer").minimize(loss)
# Initializer, saver, session
init = tf.global_variables_initializer()
saver = tf.train.Saver( max_to_keep=20 )
sess = tf.Session()
sess.run(init)
# Let's export a SavedModel
CreateSavedModel(sess, ["x:0", "y:0", "is_training:0"], ["estimated:0"], params.outdir)
\ No newline at end of file
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
#========================================================================== # ==========================================================================
# #
# Copyright Remi Cresson (IRSTEA) # Copyright 2018-2019 Remi Cresson (IRSTEA)
# Copyright 2020 Remi Cresson (INRAE)
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -15,74 +16,77 @@ ...@@ -15,74 +16,77 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
#==========================================================================*/ # ==========================================================================*/
from tricks import *
import argparse import argparse
from tricks import create_savedmodel
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--nclasses", type=int, default=8, help="number of classes") parser.add_argument("--nclasses", type=int, default=8, help="number of classes")
parser.add_argument("--outdir", help="Output directory for SavedModel", required=True) parser.add_argument("--outdir", help="Output directory for SavedModel", required=True)
params = parser.parse_args() params = parser.parse_args()
def myModel(x1,x2):
def my_model(x1, x2):
# The XS branch (input patches: 8x8x4) # The XS branch (input patches: 8x8x4)
conv1_x1 = tf.layers.conv2d(inputs=x1, filters=16, kernel_size=[5,5], padding="valid", conv1_x1 = tf.compat.v1.layers.conv2d(inputs=x1, filters=16, kernel_size=[5, 5], padding="valid",
activation=tf.nn.relu) # out size: 4x4x16 activation=tf.nn.relu) # out size: 4x4x16
conv2_x1 = tf.layers.conv2d(inputs=conv1_x1, filters=32, kernel_size=[3,3], padding="valid", conv2_x1 = tf.compat.v1.layers.conv2d(inputs=conv1_x1, filters=32, kernel_size=[3, 3], padding="valid",
activation=tf.nn.relu) # out size: 2x2x32 activation=tf.nn.relu) # out size: 2x2x32
conv3_x1 = tf.layers.conv2d(inputs=conv2_x1, filters=64, kernel_size=[2,2], padding="valid", conv3_x1 = tf.compat.v1.layers.conv2d(inputs=conv2_x1, filters=64, kernel_size=[2, 2], padding="valid",
activation=tf.nn.relu) # out size: 1x1x64 activation=tf.nn.relu) # out size: 1x1x64
# The PAN branch (input patches: 32x32x1) # The PAN branch (input patches: 32x32x1)
conv1_x2 = tf.layers.conv2d(inputs=x2, filters=16, kernel_size=[5,5], padding="valid", conv1_x2 = tf.compat.v1.layers.conv2d(inputs=x2, filters=16, kernel_size=[5, 5], padding="valid",
activation=tf.nn.relu) # out size: 28x28x16 activation=tf.nn.relu) # out size: 28x28x16
pool1_x2 = tf.layers.max_pooling2d(inputs=conv1_x2, pool_size=[2, 2], pool1_x2 = tf.compat.v1.layers.max_pooling2d(inputs=conv1_x2, pool_size=[2, 2],
strides=2) # out size: 14x14x16 strides=2) # out size: 14x14x16
conv2_x2 = tf.layers.conv2d(inputs=pool1_x2, filters=32, kernel_size=[5,5], padding="valid", conv2_x2 = tf.compat.v1.layers.conv2d(inputs=pool1_x2, filters=32, kernel_size=[5, 5], padding="valid",
activation=tf.nn.relu) # out size: 10x10x32 activation=tf.nn.relu) # out size: 10x10x32
pool2_x2 = tf.layers.max_pooling2d(inputs=conv2_x2, pool_size=[2, 2], pool2_x2 = tf.compat.v1.layers.max_pooling2d(inputs=conv2_x2, pool_size=[2, 2],
strides=2) # out size: 5x5x32 strides=2) # out size: 5x5x32
conv3_x2 = tf.layers.conv2d(inputs=pool2_x2, filters=64, kernel_size=[3,3], padding="valid", conv3_x2 = tf.compat.v1.layers.conv2d(inputs=pool2_x2, filters=64, kernel_size=[3, 3], padding="valid",
activation=tf.nn.relu) # out size: 3x3x64 activation=tf.nn.relu) # out size: 3x3x64
conv4_x2 = tf.layers.conv2d(inputs=conv3_x2, filters=64, kernel_size=[3,3], padding="valid", conv4_x2 = tf.compat.v1.layers.conv2d(inputs=conv3_x2, filters=64, kernel_size=[3, 3], padding="valid",
activation=tf.nn.relu) # out size: 1x1x64 activation=tf.nn.relu) # out size: 1x1x64
# Stack features # Stack features
features = tf.reshape(tf.stack([conv3_x1, conv4_x2], axis=3), features = tf.reshape(tf.stack([conv3_x1, conv4_x2], axis=3),
shape=[-1, 128], name="features") shape=[-1, 128], name="features")
# 8 neurons for 8 classes # 8 neurons for 8 classes
estimated = tf.layers.dense(inputs=features, units=params.nclasses, activation=None) estimated = tf.compat.v1.layers.dense(inputs=features, units=params.nclasses, activation=None)
estimated_label = tf.argmax(estimated, 1, name="prediction") estimated_label = tf.argmax(estimated, 1, name="prediction")
return estimated, estimated_label return estimated, estimated_label
# Create the graph # Create the graph
with tf.Graph().as_default(): with tf.compat.v1.Graph().as_default():
# Placeholders
# Placeholders x1 = tf.compat.v1.placeholder(tf.float32, [None, None, None, 4], name="x1")
x1 = tf.placeholder(tf.float32, [None, None, None, 4], name="x1") x2 = tf.compat.v1.placeholder(tf.float32, [None, None, None, 1], name="x2")
x2 = tf.placeholder(tf.float32, [None, None, None, 1], name="x2") y = tf.compat.v1.placeholder(tf.int32, [None, None, None, 1], name="y")
y = tf.placeholder(tf.int32 , [None, None, None, 1], name="y") lr = tf.compat.v1.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]),
lr = tf.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]), shape=[], name="lr")
shape=[], name="lr")
# Output
# Output y_estimated, y_label = my_model(x1, x2)
y_estimated, y_label = myModel(x1,x2)
# Loss function
# Loss function cost = tf.compat.v1.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
cost = tf.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]), logits=tf.reshape(y_estimated, [-1, params.nclasses]))
logits=tf.reshape(y_estimated, [-1, params.nclasses]))
# Optimizer
# Optimizer optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=lr, name="optimizer").minimize(cost)
optimizer = tf.train.AdamOptimizer(learning_rate=lr, name="optimizer").minimize(cost)
# Initializer, saver, session
# Initializer, saver, session init = tf.compat.v1.global_variables_initializer()
init = tf.global_variables_initializer() saver = tf.compat.v1.train.Saver(max_to_keep=20)
saver = tf.train.Saver( max_to_keep=20 ) sess = tf.compat.v1.Session()
sess = tf.Session() sess.run(init)
sess.run(init)
# Create a SavedModel # Create a SavedModel
CreateSavedModel(sess, ["x1:0", "x2:0", "y:0"], ["features:0", "prediction:0"], params.outdir) create_savedmodel(sess, ["x1:0", "x2:0", "y:0"], ["features:0", "prediction:0"], params.outdir)
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
#========================================================================== # ==========================================================================
# #
# Copyright Remi Cresson (IRSTEA) # Copyright 2018-2019 Remi Cresson (IRSTEA)
# Copyright 2020 Remi Cresson (INRAE)
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -15,60 +16,62 @@ ...@@ -15,60 +16,62 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
#==========================================================================*/ # ==========================================================================*/
from tricks import *
import argparse import argparse
from tricks import create_savedmodel
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--nclasses", type=int, default=8, help="number of classes") parser.add_argument("--nclasses", type=int, default=8, help="number of classes")
parser.add_argument("--outdir", help="Output directory for SavedModel", required=True) parser.add_argument("--outdir", help="Output directory for SavedModel", required=True)
params = parser.parse_args() params = parser.parse_args()
def myModel(x):
# input patches: 16x16x4
conv1 = tf.layers.conv2d(inputs=x, filters=16, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 12x12x16
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) # out: 6x6x16
conv2 = tf.layers.conv2d(inputs=pool1, filters=16, kernel_size=[3,3], padding="valid",
activation=tf.nn.relu) # out size: 4x4x16
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # out: 2x2x16
conv3 = tf.layers.conv2d(inputs=pool2, filters=32, kernel_size=[2,2], padding="valid",
activation=tf.nn.relu) # out size: 1x1x32
# Features
features = tf.reshape(conv3, shape=[-1, 32], name="features")
# 8 neurons for 8 classes
estimated = tf.layers.dense(inputs=features, units=params.nclasses, activation=None)
estimated_label = tf.argmax(estimated, 1, name="prediction")
return estimated, estimated_label def my_model(x):
# input patches: 16x16x4
conv1 = tf.compat.v1.layers.conv2d(inputs=x, filters=16, kernel_size=[5, 5], padding="valid",
activation=tf.nn.relu) # out size: 12x12x16
pool1 = tf.compat.v1.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) # out: 6x6x16
conv2 = tf.compat.v1.layers.conv2d(inputs=pool1, filters=16, kernel_size=[3, 3], padding="valid",
activation=tf.nn.relu) # out size: 4x4x16
pool2 = tf.compat.v1.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # out: 2x2x16
conv3 = tf.compat.v1.layers.conv2d(inputs=pool2, filters=32, kernel_size=[2, 2], padding="valid",
activation=tf.nn.relu) # out size: 1x1x32
# Features
features = tf.reshape(conv3, shape=[-1, 32], name="features")
# Neurons for classes
estimated = tf.compat.v1.layers.dense(inputs=features, units=params.nclasses, activation=None)
estimated_label = tf.argmax(estimated, 1, name="prediction")
return estimated, estimated_label
# Create the TensorFlow graph # Create the TensorFlow graph
with tf.Graph().as_default(): with tf.compat.v1.Graph().as_default():
# Placeholders
# Placeholders x = tf.compat.v1.placeholder(tf.float32, [None, None, None, 4], name="x")
x = tf.placeholder(tf.float32, [None, None, None, 4], name="x") y = tf.compat.v1.placeholder(tf.int32, [None, None, None, 1], name="y")
y = tf.placeholder(tf.int32 , [None, None, None, 1], name="y") lr = tf.compat.v1.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]), shape=[], name="lr")
lr = tf.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]),
shape=[], name="lr") # Output
y_estimated, y_label = my_model(x)
# Output
y_estimated, y_label = myModel(x) # Loss function
cost = tf.compat.v1.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
# Loss function logits=tf.reshape(y_estimated, [-1, params.nclasses]))
cost = tf.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
logits=tf.reshape(y_estimated, [-1, params.nclasses])) # Optimizer
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=lr, name="optimizer").minimize(cost)
# Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=lr, name="optimizer").minimize(cost) # Initializer, saver, session
init = tf.compat.v1.global_variables_initializer()
# Initializer, saver, session saver = tf.compat.v1.train.Saver(max_to_keep=20)
init = tf.global_variables_initializer() sess = tf.compat.v1.Session()
saver = tf.train.Saver( max_to_keep=20 ) sess.run(init)
sess = tf.Session()
sess.run(init)
# Create a SavedModel # Create a SavedModel
CreateSavedModel(sess, ["x:0", "y:0"], ["features:0", "prediction:0"], params.outdir) create_savedmodel(sess, ["x:0", "y:0"], ["features:0", "prediction:0"], params.outdir)
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
#========================================================================== # ==========================================================================
# #
# Copyright Remi Cresson (IRSTEA) # Copyright 2018-2019 Remi Cresson (IRSTEA)
# Copyright 2020 Remi Cresson (INRAE)
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -15,60 +16,62 @@ ...@@ -15,60 +16,62 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
#==========================================================================*/ # ==========================================================================*/
from tricks import *
import argparse import argparse
from tricks import create_savedmodel
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--nclasses", type=int, default=8, help="number of classes") parser.add_argument("--nclasses", type=int, default=8, help="number of classes")
parser.add_argument("--outdir", help="Output directory for SavedModel", required=True) parser.add_argument("--outdir", help="Output directory for SavedModel", required=True)
params = parser.parse_args() params = parser.parse_args()
def myModel(x):
# input patches: 16x16x4
conv1 = tf.layers.conv2d(inputs=x, filters=16, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 12x12x16
conv2 = tf.layers.conv2d(inputs=conv1, filters=16, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 8x8x16
conv3 = tf.layers.conv2d(inputs=conv2, filters=32, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 4x4x32
conv4 = tf.layers.conv2d(inputs=conv3, filters=32, kernel_size=[4,4], padding="valid",
activation=tf.nn.relu) # out size: 1x1x32
# Features
features = tf.reshape(conv4, shape=[-1, 32], name="features")
# 8 neurons for 8 classes
estimated = tf.layers.dense(inputs=features, units=params.nclasses, activation=None)
estimated_label = tf.argmax(estimated, 1, name="prediction")
return estimated, estimated_label def my_model(x):
# input patches: 16x16x4
conv1 = tf.compat.v1.layers.conv2d(inputs=x, filters=16, kernel_size=[5, 5], padding="valid",
activation=tf.nn.relu) # out size: 12x12x16
conv2 = tf.compat.v1.layers.conv2d(inputs=conv1, filters=16, kernel_size=[5, 5], padding="valid",
activation=tf.nn.relu) # out size: 8x8x16
conv3 = tf.compat.v1.layers.conv2d(inputs=conv2, filters=32, kernel_size=[5, 5], padding="valid",
activation=tf.nn.relu) # out size: 4x4x32
conv4 = tf.compat.v1.layers.conv2d(inputs=conv3, filters=32, kernel_size=[4, 4], padding="valid",
activation=tf.nn.relu) # out size: 1x1x32
# Features
features = tf.reshape(conv4, shape=[-1, 32], name="features")
# 8 neurons for 8 classes
estimated = tf.compat.v1.layers.dense(inputs=features, units=params.nclasses, activation=None)
estimated_label = tf.argmax(estimated, 1, name="prediction")
return estimated, estimated_label
# Create the TensorFlow graph # Create the TensorFlow graph
with tf.Graph().as_default(): with tf.compat.v1.Graph().as_default():
# Placeholders
# Placeholders x = tf.compat.v1.placeholder(tf.float32, [None, None, None, 4], name="x")
x = tf.placeholder(tf.float32, [None, None, None, 4], name="x") y = tf.compat.v1.placeholder(tf.int32, [None, None, None, 1], name="y")
y = tf.placeholder(tf.int32 , [None, None, None, 1], name="y") lr = tf.compat.v1.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]), shape=[], name="lr")
lr = tf.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]),
shape=[], name="lr") # Output
y_estimated, y_label = my_model(x)
# Output
y_estimated, y_label = myModel(x) # Loss function
cost = tf.compat.v1.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
# Loss function logits=tf.reshape(y_estimated, [-1, params.nclasses]))
cost = tf.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
logits=tf.reshape(y_estimated, [-1, params.nclasses])) # Optimizer
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=lr, name="optimizer").minimize(cost)
# Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=lr, name="optimizer").minimize(cost) # Initializer, saver, session
init = tf.compat.v1.global_variables_initializer()
# Initializer, saver, session saver = tf.compat.v1.train.Saver(max_to_keep=20)
init = tf.global_variables_initializer() sess = tf.compat.v1.Session()
saver = tf.train.Saver( max_to_keep=20 ) sess.run(init)
sess = tf.Session()
sess.run(init)
# Create a SavedModel # Create a SavedModel
CreateSavedModel(sess, ["x:0", "y:0"], ["features:0", "prediction:0"], params.outdir) create_savedmodel(sess, ["x:0", "y:0"], ["features:0", "prediction:0"], params.outdir)
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
#========================================================================== # ==========================================================================
# #
# Copyright 2018-2019 Remi Cresson (IRSTEA) # Copyright 2018-2019 Remi Cresson (IRSTEA)
# Copyright 2020 Remi Cresson (INRAE) # Copyright 2020 Remi Cresson (INRAE)
...@@ -16,77 +16,76 @@ ...@@ -16,77 +16,76 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
#==========================================================================*/ # ==========================================================================*/
import gdal import gdal
import numpy as np import numpy as np
import tensorflow as tf import tensorflow.compat.v1 as tf
from deprecated import deprecated
@deprecated tf.disable_v2_behavior()
def read_samples(fn, single=False):
return ReadImageAsNumpyArray(filename=fn, asPatches=not single)
def ReadImageAsNumpyArray(filename, asPatches=False):
"""
Read an image as numpy array.
@param filename File name of patches image
@param asPatches True if the image must be read as patches
@return 4D numpy array [batch, h, w, c]
"""
# Open a GDAL dataset
ds = gdal.Open(filename)
if (ds is None):
raise Exception("Unable to open file {}".format(filename))
# Raster infos
nBands = ds.RasterCount
szx = ds.RasterXSize
szy = ds.RasterYSize
# Raster array def read_image_as_np(filename, as_patches=False):
myarray = ds.ReadAsArray() """
Read an image as numpy array.
@param filename File name of patches image
@param as_patches True if the image must be read as patches
@return 4D numpy array [batch, h, w, c]
"""
# Re-order bands (when there is > 1 band) # Open a GDAL dataset
if (len(myarray.shape) == 3): ds = gdal.Open(filename)
axes = (1, 2, 0) if ds is None:
myarray = np.transpose(myarray, axes=axes) raise Exception("Unable to open file {}".format(filename))
if (asPatches): # Raster infos
n = int(szy / szx) n_bands = ds.RasterCount
return myarray.reshape((n, szx, szx, nBands)) szx = ds.RasterXSize
szy = ds.RasterYSize
return myarray.reshape((1, szy, szx, nBands))
def CreateSavedModel(sess, inputs, outputs, directory): # Raster array
""" myarray = ds.ReadAsArray()
Create a SavedModel
@param sess TF session # Re-order bands (when there is > 1 band)
@param inputs List of inputs names (e.g. ["x_cnn_1:0", "x_cnn_2:0"]) if (len(myarray.shape) == 3):
@param outputs List of outputs names (e.g. ["prediction:0", "features:0"]) axes = (1, 2, 0)
@param directory Path for the generated SavedModel myarray = np.transpose(myarray, axes=axes)
"""
print("Create a SavedModel in " + directory) if (as_patches):
graph = tf.get_default_graph() n = int(szy / szx)
inputs_names = { i : graph.get_tensor_by_name(i) for i in inputs } return myarray.reshape((n, szx, szx, n_bands))
outputs_names = { o : graph.get_tensor_by_name(o) for o in outputs }
tf.saved_model.simple_save(sess, directory, inputs=inputs_names, outputs=outputs_names) return myarray.reshape((1, szy, szx, n_bands))
def CheckpointToSavedModel(ckpt_path, inputs, outputs, savedmodel_path, clear_devices=False):
""" def create_savedmodel(sess, inputs, outputs, directory):
Read a Checkpoint and build a SavedModel """
@param ckpt_path Path to the checkpoint file (without the ".meta" extension) Create a SavedModel
@param inputs List of inputs names (e.g. ["x_cnn_1:0", "x_cnn_2:0"]) @param sess TF session
@param outputs List of outputs names (e.g. ["prediction:0", "features:0"]) @param inputs List of inputs names (e.g. ["x_cnn_1:0", "x_cnn_2:0"])
@param savedmodel_path Path for the generated SavedModel @param outputs List of outputs names (e.g. ["prediction:0", "features:0"])
@param clear_devices Clear TF devices positionning (True/False) @param directory Path for the generated SavedModel
""" """
tf.reset_default_graph() print("Create a SavedModel in " + directory)
with tf.Session() as sess: graph = tf.compat.v1.get_default_graph()
inputs_names = {i: graph.get_tensor_by_name(i) for i in inputs}
# Restore variables from disk outputs_names = {o: graph.get_tensor_by_name(o) for o in outputs}
model_saver = tf.train.import_meta_graph(ckpt_path+".meta", clear_devices=clear_devices) tf.compat.v1.saved_model.simple_save(sess, directory, inputs=inputs_names, outputs=outputs_names)
model_saver.restore(sess, ckpt_path)
# Create a SavedModel def ckpt_to_savedmodel(ckpt_path, inputs, outputs, savedmodel_path, clear_devices=False):
CreateSavedModel(sess, inputs=inputs, outputs=outputs, directory=savedmodel_path) """
Read a Checkpoint and build a SavedModel
@param ckpt_path Path to the checkpoint file (without the ".meta" extension)
@param inputs List of inputs names (e.g. ["x_cnn_1:0", "x_cnn_2:0"])
@param outputs List of outputs names (e.g. ["prediction:0", "features:0"])
@param savedmodel_path Path for the generated SavedModel
@param clear_devices Clear TF devices positionning (True/False)
"""
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
# Restore variables from disk
model_saver = tf.compat.v1.train.import_meta_graph(ckpt_path + ".meta", clear_devices=clear_devices)
model_saver.restore(sess, ckpt_path)
# Create a SavedModel
create_savedmodel(sess, inputs=inputs, outputs=outputs, directory=savedmodel_path)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment