diff --git a/REUNION/Bgru.py b/REUNION/Bgru.py index 1f8a908868c8f483858b91f2d77fbda0bd1750f2..039e53550de5ba750ecab1f548391df48420eb08 100644 --- a/REUNION/Bgru.py +++ b/REUNION/Bgru.py @@ -35,34 +35,30 @@ def Bgru(x, nunits, nlayer, timesteps, nclasses, dropout): #def getPrediction(x_rnn, x_rnn_b, x_cnn, nunits, nlayer, n_classes, choice): def getPrediction(x, nunits, nlayer, n_classes, dropout, is_training): n_timetamps = 34 - prediction = None - features = None features = Bgru( x, nunits, nlayer, n_timetamps, n_classes, dropout ) # Trainable parameters - print "output",features.get_shape() + print "output ",features.get_shape() attention_size = int(nunits) - print "units",nunits + print "units ",nunits print "att size",attention_size W_omega = tf.Variable(tf.random_normal([nunits*2, attention_size], stddev=0.1)) - print "womega",W_omega.get_shape() + print "womega ",W_omega.get_shape() b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) u_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) - # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `v` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size v = tf.tanh(tf.tensordot(features, W_omega, axes=1) + b_omega) - # For each of the timestamps its vector of size A from `v` is reduced with `u` vector vu = tf.tensordot(v, u_omega, axes=1) # (B,T) shape - alphas = tf.nn.softmax(vu) # (B,T) shape also # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape features = tf.reduce_sum(features * tf.expand_dims(alphas, -1), 1) - print "output",features.get_shape() + print "output", features.get_shape() features = tf.reshape(features, [-1, nunits*2]) - print "output",features.get_shape() + print "output ",features.get_shape() prediction = tf.layers.dense( features, n_classes, activation=None, name='prediction') - print "prediction",prediction.get_shape() + print "prediction ",prediction.get_shape() + return prediction def getBatch(X, Y, i, batch_size): diff --git a/REUNION/RFCts.py b/REUNION/RFCts.py index 9625e2a483e19b77dc357916037f7e00c28f1aeb..1bc9b3012e0ba3dc05bd927afc9446d9e9a2da5a 100644 --- a/REUNION/RFCts.py +++ b/REUNION/RFCts.py @@ -44,7 +44,7 @@ predC = clf.predict(test_x) KAPPA = cohen_kappa_score( test_y, predC ) accuracy = accuracy_score( test_y, predC ) -fscore = f1_score( test_y, predC, average='micro' ) +fscore = f1_score( test_y, predC, average='weighted' ) var_prec, var_rec, var_fsc, _ = precision_recall_fscore_support( test_y, predC ) diff --git a/REUNION/RFCvhsr.py b/REUNION/RFCvhsr.py index f7c24f6e3cae570a80b368527bf604aed0ee9e68..6fce37377f391e8dd1c2e6b7bd555ebf0885f5b1 100644 --- a/REUNION/RFCvhsr.py +++ b/REUNION/RFCvhsr.py @@ -63,7 +63,7 @@ predC = clf.predict(test_x) KAPPA = cohen_kappa_score( test_y, predC ) accuracy = accuracy_score( test_y, predC ) -fscore = f1_score( test_y, predC, average='micro' ) +fscore = f1_score( test_y, predC, average='weighted' ) var_prec, var_rec, var_fsc, _ = precision_recall_fscore_support( test_y, predC )