Commit 8cc0e197 authored by Cresson Remi's avatar Cresson Remi

WIP: migrade python codes from TF1.X --> TF2.X

parent 30634814
# -*- coding: utf-8 -*-
#==========================================================================
# ==========================================================================
#
# Copyright 2018-2019 Remi Cresson (IRSTEA)
# Copyright 2020 Remi Cresson (INRAE)
......@@ -16,28 +16,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# ==========================================================================*/
import argparse
from tricks import CheckpointToSavedModel
from tricks import ckpt_to_savedmodel
# Parser
parser = argparse.ArgumentParser()
parser.add_argument("--ckpt", help="Checkpoint file (without the \".meta\" extension)", required=True)
parser.add_argument("--inputs", help="Inputs names (e.g. [\"x_cnn_1:0\", \"x_cnn_2:0\"])", required=True, nargs='+')
parser.add_argument("--outputs", help="Outputs names (e.g. [\"prediction:0\", \"features:0\"])", required=True, nargs='+')
parser.add_argument("--model", help="Output directory for SavedModel", required=True)
parser.add_argument("--ckpt", help="Checkpoint file (without the \".meta\" extension)", required=True)
parser.add_argument("--inputs", help="Inputs names (e.g. [\"x_cnn_1:0\", \"x_cnn_2:0\"])", required=True, nargs='+')
parser.add_argument("--outputs", help="Outputs names (e.g. [\"prediction:0\", \"features:0\"])", required=True,
nargs='+')
parser.add_argument("--model", help="Output directory for SavedModel", required=True)
parser.add_argument('--clear_devices', dest='clear_devices', action='store_true')
parser.set_defaults(clear_devices=False)
params = parser.parse_args()
if __name__ == "__main__":
CheckpointToSavedModel(ckpt_path=params.ckpt,
inputs=params.inputs,
outputs=params.outputs,
savedmodel_path=params.model,
clear_devices=params.clear_devices)
quit()
ckpt_to_savedmodel(ckpt_path=params.ckpt,
inputs=params.inputs,
outputs=params.outputs,
savedmodel_path=params.model,
clear_devices=params.clear_devices)
# -*- coding: utf-8 -*-
#==========================================================================
#
# Copyright Remi Cresson (IRSTEA)
# Copyright 2018-2019 Remi Cresson (IRSTEA)
# Copyright 2020 Remi Cresson (INRAE)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -19,12 +20,14 @@
# Reference:
#
# Maggiori, E., Tarabalka, Y., Charpiat, G., & Alliez, P. (2016).
# Maggiori, E., Tarabalka, Y., Charpiat, G., & Alliez, P. (2016).
# "Convolutional neural networks for large-scale remote-sensing image classification."
# IEEE Transactions on Geoscience and Remote Sensing, 55(2), 645-657.
from tricks import *
import argparse
from tricks import create_savedmodel
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
parser = argparse.ArgumentParser()
parser.add_argument("--outdir", help="Output directory for SavedModel", required=True)
......@@ -32,93 +35,69 @@ parser.add_argument("--n_channels", type=int, default=4, help="number of channel
params = parser.parse_args()
# Build the graph
with tf.Graph().as_default():
# Size of patches
patch_size_xs = 80
patch_size_label = 16
# placeholder for images and labels
istraining_placeholder = tf.placeholder_with_default(tf.constant(False , dtype=tf.bool, shape=[]), shape=[], name="is_training")
learning_rate = tf.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]), shape=[], name="learning_rate")
xs_placeholder = tf.placeholder(tf.float32, shape=(None, patch_size_xs, patch_size_xs, params.n_channels), name="x")
labels_placeholder = tf.placeholder(tf.int32, shape=(None, patch_size_label, patch_size_label, 1), name="y")
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=xs_placeholder,
filters=64,
kernel_size=[12, 12],
padding="valid",
activation=tf.nn.crelu)
# Normalization of output of layer 1
norm1 = tf.layers.batch_normalization(conv1)
# pooling layer #1
pool1 = tf.layers.max_pooling2d(inputs=norm1, pool_size=[4, 4], strides=4)
# Convolutional Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=112,
kernel_size=[4, 4],
padding="valid",
activation=tf.nn.crelu)
# Normalization of output of layer 2
norm2 = tf.layers.batch_normalization(conv2)
# Convolutional Layer #3
conv3 = tf.layers.conv2d(
inputs=norm2,
filters=80,
kernel_size=[3, 3],
padding="valid",
activation=tf.nn.crelu)
# Normalization of output of layer 3
norm3 = tf.layers.batch_normalization(conv3)
# Convolutional Layer #4
conv4 = tf.layers.conv2d(
inputs=norm3,
filters=1,
kernel_size=[8, 8],
padding="valid",
activation=tf.nn.crelu)
print_tensor_info("conv4",conv4)
# Deconv = conv on the padded/strided input, that is an (5+1)*4
deconv1 = tf.layers.conv2d_transpose(
inputs=conv4,
filters=1,
strides=(4,4),
kernel_size=[8, 8],
padding="valid",
activation=tf.nn.sigmoid)
print_tensor_info("deconv1",deconv1)
numbatch = tf.shape(deconv1)[0]
szx = tf.shape(deconv1)[1]
szy = tf.shape(deconv1)[2]
estimated = tf.slice(deconv1, [0, 4, 4, 0], [numbatch, szx - 8, szy - 8, 1], "estimated")
print_tensor_info("estimated", estimated)
# Loss
estimated_resized = tf.reshape(estimated, [-1, patch_size_label*patch_size_label])
labels_resized = tf.reshape(labels_placeholder, [-1, patch_size_label*patch_size_label])
labels_resized = tf.cast(labels_resized, tf.float32)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_resized, logits=estimated_resized))
# Optimizer
train_op = tf.train.AdamOptimizer(learning_rate, name="optimizer").minimize(loss)
# Initializer, saver, session
init = tf.global_variables_initializer()
saver = tf.train.Saver( max_to_keep=20 )
sess = tf.Session()
sess.run(init)
# Let's export a SavedModel
CreateSavedModel(sess, ["x:0", "y:0", "is_training:0"], ["estimated:0"], params.outdir)
\ No newline at end of file
with tf.compat.v1.Graph().as_default():
# Size of patches
patch_size_xs = 80
patch_size_label = 16
# placeholder for images and labels
lr = tf.compat.v1.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]), shape=[],
name="learning_rate")
x = tf.compat.v1.placeholder(tf.float32, shape=(None, patch_size_xs, patch_size_xs, params.n_channels), name="x")
y = tf.compat.v1.placeholder(tf.int32, shape=(None, patch_size_label, patch_size_label, 1), name="y")
# Convolutional Layer #1
conv1 = tf.compat.v1.layers.conv2d(inputs=x, filters=64, kernel_size=[12, 12], padding="valid",
activation=tf.nn.crelu)
# Normalization of output of layer 1
norm1 = tf.compat.v1.layers.batch_normalization(conv1)
# pooling layer #1
pool1 = tf.compat.v1.layers.max_pooling2d(inputs=norm1, pool_size=[4, 4], strides=4)
# Convolutional Layer #2
conv2 = tf.compat.v1.layers.conv2d(inputs=pool1, filters=112, kernel_size=[4, 4], padding="valid",
activation=tf.nn.crelu)
# Normalization of output of layer 2
norm2 = tf.compat.v1.layers.batch_normalization(conv2)
# Convolutional Layer #3
conv3 = tf.compat.v1.layers.conv2d(inputs=norm2, filters=80, kernel_size=[3, 3], padding="valid",
activation=tf.nn.crelu)
# Normalization of output of layer 3
norm3 = tf.compat.v1.layers.batch_normalization(conv3)
# Convolutional Layer #4
conv4 = tf.compat.v1.layers.conv2d(inputs=norm3, filters=1, kernel_size=[8, 8], padding="valid",
activation=tf.nn.crelu)
# Deconv = conv on the padded/strided input, that is an (5+1)*4
deconv1 = tf.compat.v1.layers.conv2d_transpose(inputs=conv4, filters=1, strides=(4,4), kernel_size=[8, 8],
padding="valid", activation=tf.nn.sigmoid)
n = tf.shape(deconv1)[0]
szx = tf.shape(deconv1)[1]
szy = tf.shape(deconv1)[2]
estimated = tf.slice(deconv1, [0, 4, 4, 0], [n, szx - 8, szy - 8, 1], "estimated")
# Loss
estimated_resized = tf.reshape(estimated, [-1, patch_size_label*patch_size_label])
labels_resized = tf.reshape(y, [-1, patch_size_label*patch_size_label])
labels_resized = tf.cast(labels_resized, tf.float32)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_resized, logits=estimated_resized))
# Optimizer
train_op = tf.compat.v1.train.AdamOptimizer(lr, name="optimizer").minimize(loss)
# Initializer, saver, session
init = tf.compat.v1.global_variables_initializer()
saver = tf.compat.v1.train.Saver(max_to_keep=20)
sess = tf.compat.v1.Session()
sess.run(init)
# Let's export a SavedModel
create_savedmodel(sess, ["x:0", "y:0", "is_training:0"], ["estimated:0"], params.outdir)
# -*- coding: utf-8 -*-
#==========================================================================
# ==========================================================================
#
# Copyright Remi Cresson (IRSTEA)
# Copyright 2018-2019 Remi Cresson (IRSTEA)
# Copyright 2020 Remi Cresson (INRAE)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -15,74 +16,77 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from tricks import *
# ==========================================================================*/
import argparse
from tricks import create_savedmodel
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
parser = argparse.ArgumentParser()
parser.add_argument("--nclasses", type=int, default=8, help="number of classes")
parser.add_argument("--nclasses", type=int, default=8, help="number of classes")
parser.add_argument("--outdir", help="Output directory for SavedModel", required=True)
params = parser.parse_args()
def myModel(x1,x2):
# The XS branch (input patches: 8x8x4)
conv1_x1 = tf.layers.conv2d(inputs=x1, filters=16, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 4x4x16
conv2_x1 = tf.layers.conv2d(inputs=conv1_x1, filters=32, kernel_size=[3,3], padding="valid",
activation=tf.nn.relu) # out size: 2x2x32
conv3_x1 = tf.layers.conv2d(inputs=conv2_x1, filters=64, kernel_size=[2,2], padding="valid",
activation=tf.nn.relu) # out size: 1x1x64
# The PAN branch (input patches: 32x32x1)
conv1_x2 = tf.layers.conv2d(inputs=x2, filters=16, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 28x28x16
pool1_x2 = tf.layers.max_pooling2d(inputs=conv1_x2, pool_size=[2, 2],
strides=2) # out size: 14x14x16
conv2_x2 = tf.layers.conv2d(inputs=pool1_x2, filters=32, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 10x10x32
pool2_x2 = tf.layers.max_pooling2d(inputs=conv2_x2, pool_size=[2, 2],
strides=2) # out size: 5x5x32
conv3_x2 = tf.layers.conv2d(inputs=pool2_x2, filters=64, kernel_size=[3,3], padding="valid",
activation=tf.nn.relu) # out size: 3x3x64
conv4_x2 = tf.layers.conv2d(inputs=conv3_x2, filters=64, kernel_size=[3,3], padding="valid",
activation=tf.nn.relu) # out size: 1x1x64
# Stack features
features = tf.reshape(tf.stack([conv3_x1, conv4_x2], axis=3),
shape=[-1, 128], name="features")
# 8 neurons for 8 classes
estimated = tf.layers.dense(inputs=features, units=params.nclasses, activation=None)
estimated_label = tf.argmax(estimated, 1, name="prediction")
return estimated, estimated_label
def my_model(x1, x2):
# The XS branch (input patches: 8x8x4)
conv1_x1 = tf.compat.v1.layers.conv2d(inputs=x1, filters=16, kernel_size=[5, 5], padding="valid",
activation=tf.nn.relu) # out size: 4x4x16
conv2_x1 = tf.compat.v1.layers.conv2d(inputs=conv1_x1, filters=32, kernel_size=[3, 3], padding="valid",
activation=tf.nn.relu) # out size: 2x2x32
conv3_x1 = tf.compat.v1.layers.conv2d(inputs=conv2_x1, filters=64, kernel_size=[2, 2], padding="valid",
activation=tf.nn.relu) # out size: 1x1x64
# The PAN branch (input patches: 32x32x1)
conv1_x2 = tf.compat.v1.layers.conv2d(inputs=x2, filters=16, kernel_size=[5, 5], padding="valid",
activation=tf.nn.relu) # out size: 28x28x16
pool1_x2 = tf.compat.v1.layers.max_pooling2d(inputs=conv1_x2, pool_size=[2, 2],
strides=2) # out size: 14x14x16
conv2_x2 = tf.compat.v1.layers.conv2d(inputs=pool1_x2, filters=32, kernel_size=[5, 5], padding="valid",
activation=tf.nn.relu) # out size: 10x10x32
pool2_x2 = tf.compat.v1.layers.max_pooling2d(inputs=conv2_x2, pool_size=[2, 2],
strides=2) # out size: 5x5x32
conv3_x2 = tf.compat.v1.layers.conv2d(inputs=pool2_x2, filters=64, kernel_size=[3, 3], padding="valid",
activation=tf.nn.relu) # out size: 3x3x64
conv4_x2 = tf.compat.v1.layers.conv2d(inputs=conv3_x2, filters=64, kernel_size=[3, 3], padding="valid",
activation=tf.nn.relu) # out size: 1x1x64
# Stack features
features = tf.reshape(tf.stack([conv3_x1, conv4_x2], axis=3),
shape=[-1, 128], name="features")
# 8 neurons for 8 classes
estimated = tf.compat.v1.layers.dense(inputs=features, units=params.nclasses, activation=None)
estimated_label = tf.argmax(estimated, 1, name="prediction")
return estimated, estimated_label
# Create the graph
with tf.Graph().as_default():
# Placeholders
x1 = tf.placeholder(tf.float32, [None, None, None, 4], name="x1")
x2 = tf.placeholder(tf.float32, [None, None, None, 1], name="x2")
y = tf.placeholder(tf.int32 , [None, None, None, 1], name="y")
lr = tf.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]),
shape=[], name="lr")
# Output
y_estimated, y_label = myModel(x1,x2)
# Loss function
cost = tf.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
logits=tf.reshape(y_estimated, [-1, params.nclasses]))
# Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=lr, name="optimizer").minimize(cost)
# Initializer, saver, session
init = tf.global_variables_initializer()
saver = tf.train.Saver( max_to_keep=20 )
sess = tf.Session()
sess.run(init)
with tf.compat.v1.Graph().as_default():
# Placeholders
x1 = tf.compat.v1.placeholder(tf.float32, [None, None, None, 4], name="x1")
x2 = tf.compat.v1.placeholder(tf.float32, [None, None, None, 1], name="x2")
y = tf.compat.v1.placeholder(tf.int32, [None, None, None, 1], name="y")
lr = tf.compat.v1.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]),
shape=[], name="lr")
# Output
y_estimated, y_label = my_model(x1, x2)
# Loss function
cost = tf.compat.v1.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
logits=tf.reshape(y_estimated, [-1, params.nclasses]))
# Optimizer
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=lr, name="optimizer").minimize(cost)
# Initializer, saver, session
init = tf.compat.v1.global_variables_initializer()
saver = tf.compat.v1.train.Saver(max_to_keep=20)
sess = tf.compat.v1.Session()
sess.run(init)
# Create a SavedModel
CreateSavedModel(sess, ["x1:0", "x2:0", "y:0"], ["features:0", "prediction:0"], params.outdir)
# Create a SavedModel
create_savedmodel(sess, ["x1:0", "x2:0", "y:0"], ["features:0", "prediction:0"], params.outdir)
# -*- coding: utf-8 -*-
#==========================================================================
# ==========================================================================
#
# Copyright Remi Cresson (IRSTEA)
# Copyright 2018-2019 Remi Cresson (IRSTEA)
# Copyright 2020 Remi Cresson (INRAE)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -15,60 +16,62 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from tricks import *
# ==========================================================================*/
import argparse
from tricks import create_savedmodel
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
parser = argparse.ArgumentParser()
parser.add_argument("--nclasses", type=int, default=8, help="number of classes")
parser.add_argument("--nclasses", type=int, default=8, help="number of classes")
parser.add_argument("--outdir", help="Output directory for SavedModel", required=True)
params = parser.parse_args()
def myModel(x):
# input patches: 16x16x4
conv1 = tf.layers.conv2d(inputs=x, filters=16, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 12x12x16
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) # out: 6x6x16
conv2 = tf.layers.conv2d(inputs=pool1, filters=16, kernel_size=[3,3], padding="valid",
activation=tf.nn.relu) # out size: 4x4x16
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # out: 2x2x16
conv3 = tf.layers.conv2d(inputs=pool2, filters=32, kernel_size=[2,2], padding="valid",
activation=tf.nn.relu) # out size: 1x1x32
# Features
features = tf.reshape(conv3, shape=[-1, 32], name="features")
# 8 neurons for 8 classes
estimated = tf.layers.dense(inputs=features, units=params.nclasses, activation=None)
estimated_label = tf.argmax(estimated, 1, name="prediction")
return estimated, estimated_label
def my_model(x):
# input patches: 16x16x4
conv1 = tf.compat.v1.layers.conv2d(inputs=x, filters=16, kernel_size=[5, 5], padding="valid",
activation=tf.nn.relu) # out size: 12x12x16
pool1 = tf.compat.v1.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) # out: 6x6x16
conv2 = tf.compat.v1.layers.conv2d(inputs=pool1, filters=16, kernel_size=[3, 3], padding="valid",
activation=tf.nn.relu) # out size: 4x4x16
pool2 = tf.compat.v1.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # out: 2x2x16
conv3 = tf.compat.v1.layers.conv2d(inputs=pool2, filters=32, kernel_size=[2, 2], padding="valid",
activation=tf.nn.relu) # out size: 1x1x32
# Features
features = tf.reshape(conv3, shape=[-1, 32], name="features")
# Neurons for classes
estimated = tf.compat.v1.layers.dense(inputs=features, units=params.nclasses, activation=None)
estimated_label = tf.argmax(estimated, 1, name="prediction")
return estimated, estimated_label
# Create the TensorFlow graph
with tf.Graph().as_default():
# Placeholders
x = tf.placeholder(tf.float32, [None, None, None, 4], name="x")
y = tf.placeholder(tf.int32 , [None, None, None, 1], name="y")
lr = tf.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]),
shape=[], name="lr")
# Output
y_estimated, y_label = myModel(x)
# Loss function
cost = tf.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
logits=tf.reshape(y_estimated, [-1, params.nclasses]))
# Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=lr, name="optimizer").minimize(cost)
# Initializer, saver, session
init = tf.global_variables_initializer()
saver = tf.train.Saver( max_to_keep=20 )
sess = tf.Session()
sess.run(init)
with tf.compat.v1.Graph().as_default():
# Placeholders
x = tf.compat.v1.placeholder(tf.float32, [None, None, None, 4], name="x")
y = tf.compat.v1.placeholder(tf.int32, [None, None, None, 1], name="y")
lr = tf.compat.v1.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]), shape=[], name="lr")
# Output
y_estimated, y_label = my_model(x)
# Loss function
cost = tf.compat.v1.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
logits=tf.reshape(y_estimated, [-1, params.nclasses]))
# Optimizer
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=lr, name="optimizer").minimize(cost)
# Initializer, saver, session
init = tf.compat.v1.global_variables_initializer()
saver = tf.compat.v1.train.Saver(max_to_keep=20)
sess = tf.compat.v1.Session()
sess.run(init)
# Create a SavedModel
CreateSavedModel(sess, ["x:0", "y:0"], ["features:0", "prediction:0"], params.outdir)
# Create a SavedModel
create_savedmodel(sess, ["x:0", "y:0"], ["features:0", "prediction:0"], params.outdir)
# -*- coding: utf-8 -*-
#==========================================================================
# ==========================================================================
#
# Copyright Remi Cresson (IRSTEA)
# Copyright 2018-2019 Remi Cresson (IRSTEA)
# Copyright 2020 Remi Cresson (INRAE)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -15,60 +16,62 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from tricks import *
# ==========================================================================*/
import argparse
from tricks import create_savedmodel
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
parser = argparse.ArgumentParser()
parser.add_argument("--nclasses", type=int, default=8, help="number of classes")
parser.add_argument("--nclasses", type=int, default=8, help="number of classes")
parser.add_argument("--outdir", help="Output directory for SavedModel", required=True)
params = parser.parse_args()
def myModel(x):
# input patches: 16x16x4
conv1 = tf.layers.conv2d(inputs=x, filters=16, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 12x12x16
conv2 = tf.layers.conv2d(inputs=conv1, filters=16, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 8x8x16
conv3 = tf.layers.conv2d(inputs=conv2, filters=32, kernel_size=[5,5], padding="valid",
activation=tf.nn.relu) # out size: 4x4x32
conv4 = tf.layers.conv2d(inputs=conv3, filters=32, kernel_size=[4,4], padding="valid",
activation=tf.nn.relu) # out size: 1x1x32
# Features
features = tf.reshape(conv4, shape=[-1, 32], name="features")
# 8 neurons for 8 classes
estimated = tf.layers.dense(inputs=features, units=params.nclasses, activation=None)
estimated_label = tf.argmax(estimated, 1, name="prediction")
return estimated, estimated_label
def my_model(x):
# input patches: 16x16x4
conv1 = tf.compat.v1.layers.conv2d(inputs=x, filters=16, kernel_size=[5, 5], padding="valid",
activation=tf.nn.relu) # out size: 12x12x16
conv2 = tf.compat.v1.layers.conv2d(inputs=conv1, filters=16, kernel_size=[5, 5], padding="valid",
activation=tf.nn.relu) # out size: 8x8x16
conv3 = tf.compat.v1.layers.conv2d(inputs=conv2, filters=32, kernel_size=[5, 5], padding="valid",
activation=tf.nn.relu) # out size: 4x4x32
conv4 = tf.compat.v1.layers.conv2d(inputs=conv3, filters=32, kernel_size=[4, 4], padding="valid",
activation=tf.nn.relu) # out size: 1x1x32
# Features
features = tf.reshape(conv4, shape=[-1, 32], name="features")
# 8 neurons for 8 classes
estimated = tf.compat.v1.layers.dense(inputs=features, units=params.nclasses, activation=None)
estimated_label = tf.argmax(estimated, 1, name="prediction")
return estimated, estimated_label
# Create the TensorFlow graph
with tf.Graph().as_default():
# Placeholders
x = tf.placeholder(tf.float32, [None, None, None, 4], name="x")
y = tf.placeholder(tf.int32 , [None, None, None, 1], name="y")
lr = tf.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]),
shape=[], name="lr")
# Output
y_estimated, y_label = myModel(x)
# Loss function
cost = tf.losses.sparse_softmax_cross_entropy(labels=tf.reshape(y, [-1, 1]),
logits=tf.reshape(y_estimated, [-1, params.nclasses]))
# Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=lr, name="optimizer").minimize(cost)