...
 
Commits (4)

Too many changes to show.

To preserve performance only 1000 of 1000+ files are displayed.

......@@ -13,11 +13,11 @@ Nioro 2018 : 33 plots
## Standardisation
Data were standardised per band subtracting the mean value and dividing by the standard deviation
| | plots | rad | opt |
| ------------- | ----- |---- |---- |
| niakhar 2017 | 35 | 15 | 14 |
| niakhar 2018 | 48 | 16 | 18 |
| nioro 2018 | 33 | 16 | 16 |
| | plots | rad | opt | thrs |
| ------------- | ----- |---- |---- | ---- |
| niakhar 2017 | 35 | 15 | 14 | |
| niakhar 2018 | 48 | 16 | 18 | |
| nioro 2018 | 33 | 16 | 16 | |
### niakhar 2017
| opt | radar
......
import sys
import time
import os
import tensorflow as tf
import numpy as np
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from sklearn import linear_model
def get_batch(array, i, batch_size):
start_id = i*batch_size
end_id = min((i+1) * batch_size, array.shape[0])
batch = array[start_id:end_id]
return batch
def format_X_3D(lst_X, n_timestamps=37):
X = None
for i in range(len(lst_X)):
tmp = lst_X[i]
if X is None :
X = tmp
else :
X = np.vstack((X,tmp))
X = X.reshape(X.shape[0],n_timestamps,-1)
print (X.shape)
return X
def format_X_2D(lst_X, n_timestamps=37):
X = None
for i in range(len(lst_X)):
tmp = lst_X[i]
if X is None :
X = tmp
else :
X = np.vstack((X,tmp))
print (X.shape)
return X
def format_y (lst_y,target,scale_by=1000):
y = None
for i in range(len(lst_y)):
tmp = lst_y[i]
if y is None :
y = tmp[:,target]
else :
y = np.hstack((y,tmp[:,target]))
print (y.shape)
return y/scale_by
def conv1d (X, n_filters, k_size, name, padding_mode="same", strides=False, activate=True, bias=True) :
if strides :
conv = tf.keras.layers.Conv1D(filters=n_filters, kernel_size=k_size, strides= 2, padding=padding_mode, use_bias=bias, name=name)(X)
else :
conv = tf.keras.layers.Conv1D(filters=n_filters, kernel_size=k_size, padding=padding_mode, use_bias=bias, name=name)(X)
conv = tf.keras.layers.BatchNormalization(name="%s_batchnorm"%name)(conv)
if activate :
conv = tf.nn.relu(conv,name="%s_relu"%name)
return conv
def cnn1d_autoencoder (X,n_filters,dropOut):
with tf.variable_scope("cnn1d"):
conv1 = conv1d(X, n_filters, 3, name="conv1",)
print (conv1.get_shape())
# conv1 = tf.nn.dropout(conv1, keep_prob=1-dropOut)
conv2 = conv1d(conv1 ,n_filters, 3, name="conv2", strides=True)
print (conv2.get_shape())
# conv2 = tf.nn.dropout(conv2, keep_prob=1-dropOut)
conv3 = conv1d(conv2 ,n_filters*2, 3, name="conv3")
print (conv3.get_shape())
# conv3 = tf.nn.dropout(conv3, keep_prob=1-dropOut)
conv4 = conv1d(conv3 ,n_filters*2, 3, name="conv4",strides=True)
print (conv4.get_shape())
# conv4 = tf.nn.dropout(conv4, keep_prob=1-dropOut)
conv5 = conv1d(conv4 ,n_filters*4, 3, name="conv5")
print (conv5.get_shape())
conv6 = conv1d(conv5 ,n_filters*4, 3, name="conv6",strides=True)
print (conv6.get_shape())
flatten = tf.layers.flatten(conv6)
flatten = tf.nn.dropout(flatten, keep_prob=1-dropOut)
print (flatten.get_shape())
conv7 = tf.keras.layers.UpSampling1D()(conv6)
print (conv7.get_shape())
conv8 = tf.nn.conv1d_transpose(conv7,)
conv1d(conv7, n_filters*2, 3, name="conv7")
print (conv8.get_shape())
exit(0)
return flatten
def stacked_autoencoder (X,n_units,dropOut):
fc1 = tf.keras.layers.Dense(n_units,activation=tf.nn.relu)(X)
fc1 = tf.nn.dropout(fc1, keep_prob=1-dropOut)
fc2 = tf.keras.layers.Dense(X.get_shape()[1],activation=tf.nn.relu)(fc1)
# fc2 = tf.nn.dropout(fc2, keep_prob=1-dropOut)
return fc1, fc2
def encoder_decoder(X,n_units,dropOut):
fc1 = tf.keras.layers.Dense(n_units,activation=tf.nn.relu)(X)
fc1 = tf.nn.dropout(fc1, keep_prob=1-dropOut)
fc2 = tf.keras.layers.Dense(n_units/2,activation=tf.nn.relu)(fc1)
fc2 = tf.nn.dropout(fc2, keep_prob=1-dropOut)
fc3 = tf.keras.layers.Dense(n_units/4,activation=tf.nn.relu)(fc2)
fc3 = tf.nn.dropout(fc3, keep_prob=1-dropOut)
bottleneck = tf.keras.layers.Dense(1,activation=tf.nn.relu,name='bottleneck')(fc3)
# bottleneck = tf.nn.dropout(bottleneck, keep_prob=1-dropOut)
fc5 = tf.keras.layers.Dense(n_units/4,activation=tf.nn.relu)(bottleneck)
fc5 = tf.nn.dropout(fc5, keep_prob=1-dropOut)
fc6 = tf.keras.layers.Dense(n_units/2,activation=tf.nn.relu)(fc5)
fc6 = tf.nn.dropout(fc6, keep_prob=1-dropOut)
fc7 = tf.keras.layers.Dense(n_units,activation=tf.nn.relu)(fc6)
fc7 = tf.nn.dropout(fc7, keep_prob=1-dropOut)
fc8 = tf.keras.layers.Dense(X.get_shape()[1],activation=tf.nn.relu)(fc7)
fc8 = tf.nn.dropout(fc8, keep_prob=1-dropOut)
return bottleneck, fc8
def run (train_radar_X,train_opt_X,train_indices_X,train_y,n_units,batch_size,n_epochs,lr,drop) :
X = tf.compat.v1.placeholder(tf.float32,shape=(None,37,2),name='X')
y = tf.compat.v1.placeholder(tf.float32,shape=(None),name='y')
dropOut = tf.compat.v1.placeholder(tf.float32, shape=(), name="drop_rate")
# bottleneck, logits = encoder_decoder(X,n_units,dropOut)
# h1, X_recon = stacked_autoencoder (X,n_units,dropOut)
# h2, h1_recon = stacked_autoencoder (h1,n_units/2,dropOut)
# h3, h2_recon = stacked_autoencoder (h2,n_units/4,dropOut)
# h4, h3_recon = stacked_autoencoder (h3,n_units/8,dropOut)
# h5, h4_recon = stacked_autoencoder (h4,n_units/16,dropOut)
# h6, h5_recon = stacked_autoencoder (h5,n_units/32,dropOut)
# print (h6.get_shape())
logits = cnn1d_autoencoder(X,n_units,dropOut)
with tf.variable_scope("pred"):
pred = tf.identity(logits,name="prediction")
with tf.variable_scope("cost"):
cost = tf.reduce_mean(tf.math.squared_difference(X,pred))
# cost = tf.reduce_mean(tf.losses.absolute_difference(y,pred))
# cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=X,logits=logits))
# cost = tf.reduce_mean(tf.math.squared_difference(X,X_recon))
# cost += tf.reduce_mean(tf.math.squared_difference(h1,h1_recon))
# cost += tf.reduce_mean(tf.math.squared_difference(h2,h2_recon))
# cost += tf.reduce_mean(tf.math.squared_difference(h3,h3_recon))
# cost += tf.reduce_mean(tf.math.squared_difference(h4,h4_recon))
# cost += tf.reduce_mean(tf.math.squared_difference(h5,h5_recon))
optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(cost)
##############################################################################
n_batch = int(train_radar_X.shape[0]/batch_size)
if train_radar_X.shape[0] % batch_size != 0:
n_batch+=1
print ("n_batch: %d" %n_batch)
init = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init)
for epoch in range(1,n_epochs+1):
start = time.time()
epoch_loss = 0
train_radar_X, train_opt_X, train_indices_X, train_y = shuffle (train_radar_X,
train_opt_X, train_indices_X, train_y, random_state=0)
total_feat = None
for batch in range(n_batch):
batch_X_rad = get_batch(train_radar_X,batch,batch_size)
batch_X_opt = get_batch(train_opt_X,batch,batch_size)
batch_X_indices = get_batch(train_indices_X,batch,batch_size)
batch_y = get_batch(train_y,batch,batch_size)
loss, latent_feat, _ = session.run([cost,h6, optimizer], feed_dict={X:batch_X_rad,
y:batch_y,
dropOut:drop
})
del batch_X_rad, batch_X_opt, batch_X_indices, batch_y
if total_feat is None :
total_feat = latent_feat
else :
total_feat = np.vstack((total_feat,latent_feat))
epoch_loss += loss
stop = time.time()
elapsed = stop - start
print ("Epoch ",epoch, " Train loss:",epoch_loss/n_batch, "| Time: ",elapsed)
#############################################################################
# print (total_feat)
r2 = linear_reg (total_feat,train_y)
# r2 = r2_score(train_y,total_pred)
# if r2>0:
print ("TRAIN R2: %f" %r2)#_score(train_y,total_pred))
def linear_reg(x,y):
lm = linear_model.LinearRegression()
lm.fit(x,y)
y_pred = lm.predict(x)
r2 = r2_score(y,y_pred)
return r2
if __name__ == '__main__' :
# Reading data
train_radar_X1 = np.load(sys.argv[1])
train_opt_X1 = np.load(sys.argv[2])
train_indices_X1 = np.load(sys.argv[3])
train_y1 = np.load(sys.argv[4])
train_radar_X2 = np.load(sys.argv[5])
train_opt_X2 = np.load(sys.argv[6])
train_indices_X2 = np.load(sys.argv[7])
train_y2 = np.load(sys.argv[8])
train_radar_X3 = np.load(sys.argv[9])
train_opt_X3 = np.load(sys.argv[10])
train_indices_X3 = np.load(sys.argv[11])
train_y3 = np.load(sys.argv[12])
sys.stdout.flush
# Formatting
train_radar_X = format_X_3D([train_radar_X1,train_radar_X2])
train_opt_X = format_X_3D([train_opt_X1,train_opt_X2])
train_indices_X = format_X_3D([train_indices_X1,train_indices_X2])
train_y = format_y([train_y1,train_y2],target=-1)
# Run Model
n_units = 128
batch_size = 4
n_epochs = 1000
lr = 1E-4
drop = 0.4
run (train_radar_X,train_opt_X,train_indices_X,train_y,n_units,batch_size,n_epochs,lr,drop)
import sys
import time
import os
import tensorflow as tf
import numpy as np
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
def format_to_cnn (X):
dim = int(np.sqrt(X.shape[1]))
X = X.reshape(X.shape[0],dim,dim,X.shape[2],X.shape[3])
print (X.shape)
return X
def format_label (y,target=3,scale_by=1000):
y = y[:,target]/scale_by
print (y.shape)
return y
def get_batch(array, i, batch_size):
start_id = i*batch_size
end_id = min((i+1) * batch_size, array.shape[0])
batch = array[start_id:end_id]
return batch
def cnn2d(X):
W1 = tf.compat.v1.get_variable("W1",[5,5,2,32],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())
W2 = tf.compat.v1.get_variable("W2",[3,3,32,64],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.compat.v1.get_variable("b1",initializer=tf.compat.v1.random.normal([32]))
b2 = tf.compat.v1.get_variable("b2",initializer=tf.compat.v1.random.normal([64]))
conv1 = tf.nn.conv2d(X,W1,padding='VALID')
conv1 = tf.nn.bias_add(conv1, b1)
conv1 = tf.keras.layers.BatchNormalization(name="bn1")(conv1)
conv1 = tf.nn.relu(conv1)
print (conv1.get_shape())
max_pool = tf.compat.v1.layers.max_pooling2d(conv1,2,strides=2,padding='VALID')
print (max_pool.get_shape())
conv2 = tf.nn.conv2d(max_pool,W2,padding='VALID')
conv2 = tf.nn.bias_add(conv2, b2)
conv2 = tf.keras.layers.BatchNormalization(name="bn2")(conv2)
conv2 = tf.nn.relu(conv2)
print (conv2.get_shape())
flatten = tf.keras.layers.Flatten()(conv2)
print (flatten.get_shape())
return flatten
def add_fc(features,units,dropOut):
fc1 = tf.keras.layers.Dense(units,activation=tf.nn.relu)(features)
fc1 = tf.nn.dropout(fc1, keep_prob=1-dropOut)
fc2 = tf.keras.layers.Dense(units/2,activation=tf.nn.relu)(fc1)
fc2 = tf.nn.dropout(fc2, keep_prob=1-dropOut)
pred = tf.keras.layers.Dense(1)(fc2)
return pred
def run(train_radar_X,train_opt_X,train_indices_X,train_y,fc_units,batch_size,n_epochs,lr,drop):
dim = train_radar_X.shape[1]
X = tf.compat.v1.placeholder(tf.float32,shape=(None,dim,dim,None),name='X')
y = tf.compat.v1.placeholder(tf.float32,shape=(None),name='y')
dropOut = tf.compat.v1.placeholder(tf.float32, shape=(), name="drop_rate")
feat = cnn2d(X)
with tf.variable_scope("pred"):
logits = add_fc(feat,fc_units,dropOut)
pred = tf.identity(logits,name="prediction")
with tf.variable_scope("cost"):
cost = tf.reduce_mean(tf.math.squared_difference(y,pred))
optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(cost)
##############################################################################
n_batch = int(train_radar_X.shape[0]/batch_size)
if train_radar_X.shape[0] % batch_size != 0:
n_batch+=1
print ("n_batch: %d" %n_batch)
# saver = tf.train.Saver()
# best_r2 = sys.float_info.min
init = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init)
for epoch in range(1,n_epochs+1):
start = time.time()
epoch_loss = 0
train_radar_X, train_opt_X, train_indices_X, train_y = shuffle (train_radar_X,
train_opt_X, train_indices_X, train_y, random_state=0)
total_pred = None
for batch in range(n_batch):
batch_X_rad = get_batch(train_radar_X,batch,batch_size)
batch_X_opt = get_batch(train_opt_X,batch,batch_size)
batch_X_indices = get_batch(train_indices_X,batch,batch_size)
batch_y = get_batch(train_y,batch,batch_size)
loss, batch_pred,_ = session.run([cost,pred,optimizer], feed_dict={X:batch_X_rad,
y:batch_y,
dropOut:drop
})
del batch_X_rad, batch_X_opt, batch_X_indices, batch_y
if total_pred is None :
total_pred = batch_pred.reshape(batch_pred.shape[0])
else :
total_pred = np.hstack((total_pred,batch_pred.reshape(batch_pred.shape[0])))
epoch_loss += loss
stop = time.time()
elapsed = stop - start
print ("Epoch ",epoch, " Train loss:",epoch_loss/n_batch, "| Time: ",elapsed)
#############################################################################
# test_batch = int(test_radar_X.shape[0] / (batch_size))
# if test_radar_X.shape[0] % (batch_size) != 0:
# test_batch+=1
# total_pred = None
# for ibatch in range(test_batch):
# test_batch_X_rad = get_batch(test_radar_X,ibatch,batch_size)
# test_batch_X_opt = get_batch(test_opt_X,ibatch,batch_size)
# test_batch_X_indices = get_batch(test_indices_X,ibatch,batch_size)
# batch_pred = session.run(pred,feed_dict={X_rad:test_batch_X_rad,
# X_opt:test_batch_X_opt,
# X_indices:test_batch_X_indices,
# dropOut:0.})
# del test_batch_X_rad, test_batch_X_opt, test_batch_X_indices