diff --git a/GUnit.py b/GUnit.py
new file mode 100644
index 0000000000000000000000000000000000000000..4259dca86fb9b21126a323f89716067750a49a4c
--- /dev/null
+++ b/GUnit.py
@@ -0,0 +1,83 @@
+import tensorflow as tf
+from tensorflow.nn.rnn_cell import RNNCell
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import variable_scope as vs
+from tensorflow.python.ops import init_ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.util import nest
+from tensorflow.python.ops import nn_ops
+from functions import CNN
+
+_BIAS_G3 = "bias_gate_3"
+_BIAS_G2 = "bias_gate_2"
+_BIAS_G1 = "bias_gate_1"
+
+_WEIGHTS_G1 = "kernel_gate_1"
+_WEIGHTS_G1H = "kernel_gate_1_h"
+
+_WEIGHTS_G2 = "kernel_gate_2"
+_WEIGHTS_G2H = "kernel_gate_2_h"
+
+_WEIGHTS_G3 = "kernel_gate_3"
+_WEIGHTS_G3H = "kernel_gate_3_h"
+
+
+_BIAS_FC0 = "bias_fc0"
+_WEIGHTS_FC0 = "kernel_fc0"
+
+_BIAS_FC1 = "bias_fc1"
+_WEIGHTS_FC1 = "kernel_fc1"
+
+_BIAS_FC2 = "bias_fc2"
+_WEIGHTS_FC2 = "kernel_fc2"
+
+
+def getW(name, dim1, dim2, init, dtype):
+    return vs.get_variable(name, [dim1, dim2], dtype=dtype, initializer=init)
+
+def getB(name, dim, init, dtype):
+    return vs.get_variable(name, [dim], dtype=dtype, initializer=init)
+
+
+class GUnit(RNNCell):
+    """Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
+    def __init__(self,
+        num_units,
+        drop,
+        is_training,
+        n_timestamps,
+        activation=None,
+        reuse=None,
+        kernel_initializer=None,
+        bias_initializer=None):
+        super(GUnit, self).__init__(_reuse=reuse)
+        self._num_units = num_units
+        self._drop = drop
+        self._is_training = is_training
+        self._activation = activation or math_ops.tanh
+        self._kernel_initializer = kernel_initializer
+        self._bias_initializer = bias_initializer
+        self._n_timestamps = n_timestamps
+
+
+    @property
+    def state_size(self):
+        return self._num_units
+
+    @property
+    def output_size(self):
+        return self._num_units
+
+
+    def call(self, inputs, state):
+        #with vs.variable_scope("gates"):
+            #bias_ones = self._bias_initializer
+            #if self._bias_initializer is None:
+            #    dtype = [a.dtype for a in [inputs, state]][0]
+                # bias_ones = init_ops.constant_initializer, dtype=dtype)
+            #    bias_ones =init_ops.zeros_initializer(dtype=dtype)
+
+        new_inputs = tf.split(inputs, self._n_timestamps,axis=1)
+        new_inputs = tf.stack(new_inputs, axis=1)
+        res = CNN(new_inputs, self._num_units, self._drop, self._is_training)
+        return res, res
diff --git a/Tassel.py b/Tassel.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ef44624b6fcd291bbebabac6f8f994243b4b40e
--- /dev/null
+++ b/Tassel.py
@@ -0,0 +1,230 @@
+import sys
+import os
+import numpy as np
+import math
+import time
+import random
+from sklearn.metrics import accuracy_score, f1_score
+from sklearn.utils import shuffle
+import tensorflow as tf
+from tensorflow.contrib import rnn
+from GUnit import GUnit
+
+
+def attention(outputs_list, nunits, attention_size, seq_length, batch_size, mask):
+	outputs = tf.stack(outputs_list, axis=1)
+	W_omega = tf.Variable(tf.random_normal([nunits, attention_size], stddev=0.1))
+	b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
+	u_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
+
+	v = tf.tanh(tf.tensordot(outputs, W_omega, axes=1) + b_omega)
+	vu = tf.tensordot(v, u_omega, axes=1)   # (B,T) shape
+	vu = vu * mask	# to manage set with different length
+	alphas = tf.nn.softmax(vu)              # (B,T) shape also
+	output = tf.reduce_sum(outputs * tf.expand_dims(alphas, -1), 1)
+	output = tf.reshape(output, [-1, nunits])
+	return output, alphas
+
+def checkTest(ts_data_S2, batchsz, label_test, toPrint, data_limit, bins, nclasses):
+	tot_pred = []
+	tot_sm = None
+	iterations = ts_data_S2.shape[0] / batchsz
+
+	if ts_data_S2.shape[0] % batchsz != 0:
+	    iterations+=1
+
+	for ibatch in range(iterations):
+		batch_rnn_x_S2, batch_limit = getBatch(ts_data_S2, data_limit, ibatch, batchsz)
+		batch_mask = np.zeros((batch_limit.shape[0],bins))
+		for idx, val in enumerate(batch_limit):
+			for i in range(val):
+				batch_mask[idx,i] = 1.0
+		pred_temp = sess.run(testPrediction,feed_dict={
+													   x_data:batch_rnn_x_S2,
+													   dropOut:0.,
+													   is_training_ph:False,
+													   seq_length:batch_limit,
+													   batch_size:len(batch_limit),
+													   mask:batch_mask
+													})
+		del batch_rnn_x_S2
+		del batch_mask
+		for el in pred_temp:
+			tot_pred.append( el )
+
+	if toPrint:
+		print ("PREDICTION")
+		print ("TEST F-Measure: %f" % f1_score(label_test, tot_pred, average='weighted'))
+		print (f1_score(label_test, tot_pred, average=None))
+		print ("TEST Accuracy: %f" % accuracy_score(label_test, tot_pred))
+
+	return f1_score(label_test, tot_pred, average='weighted')
+
+def getBatch(X, Y, i, batch_size):
+    start_id = i*batch_size
+    end_id = min( (i+1) * batch_size, X.shape[0])
+    batch_x = X[start_id:end_id]
+    batch_y = Y[start_id:end_id]
+    return batch_x, batch_y
+
+
+
+def getSL(data):
+	sl = []
+	for el in data:
+		s = np.sum(el,axis=1)
+		idx = np.where(s == 0)
+		idx = idx[0]
+		if len(idx) == 0:
+			sl.append(len(s))
+		else:
+			sl.append(idx[0])
+	return np.array(sl)
+
+
+splits = sys.argv[1]
+prefix = sys.argv[2]
+n_timestamps = int(sys.argv[3])
+nunits = int(sys.argv[4]) #1024
+output_dir_models = sys.argv[5]
+
+trainDataFile = sys.argv[6]
+validDataFile = sys.argv[7]
+trainLabelFile = sys.argv[8]
+validLabelFile = sys.argv[9]
+
+if not os.path.exists(prefix):
+    os.makedirs(prefix)
+
+train_data = np.load(trainDataFile)
+valid_data = np.load(validDataFile)
+
+train_label = np.load(trainLabelFile)
+valid_label = np.load(validLabelFile)
+
+#GET SEQUENCE LENGTH FOR EACH TIME SERIES.
+#By convention if the sum of the band of a timestamps is equal to 0, this only corresponds to a padded element
+train_sl = getSL(train_data)
+valid_sl = getSL(valid_data)
+
+nfeat = train_data.shape[2]
+bins = train_data.shape[1]
+nclasses = len(np.unique(train_label))
+train_y = tf.keras.utils.to_categorical(train_label)
+
+
+#############################################################
+
+x_data = tf.placeholder("float",[None, bins, nfeat ],name="x_data")
+mask = tf.placeholder("float",[None, bins],name="mask")
+
+seq_length = tf.placeholder("float",[None],name="limits")
+batch_size = tf.placeholder("int32",[],name="limits")
+y = tf.placeholder("float",[None,nclasses],name="y")
+
+learning_rate = tf.placeholder(tf.float32, shape=[], name="learning_rate")
+is_training_ph = tf.placeholder(tf.bool, shape=(), name="is_training")
+dropOut = tf.placeholder(tf.float32, shape=(), name="drop_rate")
+
+sess = tf.InteractiveSession()
+
+
+cell = GUnit(nunits, dropOut, is_training_ph, n_timestamps)
+outputs, _ = tf.nn.dynamic_rnn( cell, x_data, sequence_length=seq_length, dtype=tf.float32 )
+
+final_list_fw = tf.unstack(outputs,axis=1)
+set_feat, alphas_b = attention(final_list_fw, nunits, nunits/2, seq_length, batch_size, mask)
+aux_cl = tf.layers.dense(set_feat, nclasses, activation=None)
+
+alphas_b = tf.identity(alphas_b, name="alphas")
+set_feat = tf.identity(set_feat, name="features")
+
+feat = tf.keras.layers.Dense(min(512,nunits/2),activation='relu')(set_feat)
+feat = tf.layers.batch_normalization(feat)
+
+feat = tf.keras.layers.Dense(min(512,nunits/2),activation='relu')(feat)
+feat = tf.layers.batch_normalization(feat)
+
+temp_pred = tf.layers.dense(feat, nclasses, activation=None)
+
+
+with tf.variable_scope("pred_env"):
+	pred_tot = tf.nn.softmax( temp_pred )
+	testPrediction = tf.argmax(pred_tot, 1, name="prediction")
+	correct = tf.equal(tf.argmax(pred_tot,1),tf.argmax(y,1))
+	accuracy = tf.reduce_mean(tf.cast(correct,tf.float64))
+
+with tf.variable_scope("cost"):
+	loss = tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=temp_pred)
+	loss_aux = tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=aux_cl)
+	cost = tf.reduce_mean(loss) + (.5 * tf.reduce_mean(loss_aux) )
+
+train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost)
+
+
+###################### END OF COMPUTATIONAL GRAPH ################################""
+def randomHorizontalShift(train_data, train_data_limit, max_len):
+	new_data = []
+	for idx, row in enumerate(train_data):
+		_, ndim = row.shape
+		limit = train_data_limit[idx]
+		temp = row[0:limit,:]
+		temp = shuffle(temp)
+		toappend = np.zeros((max_len-limit,ndim))
+		temp = np.concatenate((temp,toappend ),axis=0)
+		new_data.append(temp)
+	return np.array(new_data)
+
+
+tf.global_variables_initializer().run()
+saver = tf.train.Saver()
+
+batchsz = 32
+hm_epochs = 5000
+#output_dir_models = prefix+"/BoP_CNN_SM_"+nclust
+
+iterations = train_data.shape[0] / batchsz
+
+if train_data.shape[0] % batchsz != 0:
+    iterations+=1
+
+best_f1 = sys.float_info.min
+
+print ("n iterations: %d" % iterations)
+
+for e in range(hm_epochs):
+	start = time.time()
+	lossi = 0
+	accS = 0
+	tot_alphas = None
+	train_data,  train_sl, train_y = shuffle(train_data, train_sl, train_y)
+	train_data = randomHorizontalShift(train_data, train_sl, bins)
+
+	for ibatch in range(iterations):
+		batch_limit, _ = getBatch(train_sl, train_sl, ibatch, batchsz)
+		batch_mask = np.zeros((batch_limit.shape[0],bins))
+		for idx, val in enumerate(batch_limit):
+			batch_mask[idx,0:val] = 1.0
+		batch_x, batch_y = getBatch(train_data, train_y, ibatch, batchsz)
+		acc,_,loss, t_pred, alphas_val = sess.run([accuracy, train_op ,cost, testPrediction, alphas_b],feed_dict={ x_data:batch_x,
+														y:batch_y,
+														dropOut:0.4,
+														is_training_ph:True,
+														seq_length:batch_limit,
+														batch_size:len(batch_limit),
+														mask:batch_mask
+														})
+		lossi+=loss
+		accS+=acc
+		del batch_x
+		del batch_y
+		done = time.time()
+		elapsed = done - start
+	print ("Epoch: ",e," Train loss:",lossi/iterations," | accuracy:",accS/iterations, " | time: ",elapsed	)
+	c_loss = lossi/iterations
+	val_f1 = checkTest(valid_data, 1024, valid_label, False, valid_sl, bins, nclasses)
+	if val_f1 > best_f1:
+		save_path = saver.save(sess, output_dir_models+"/model_"+str(splits))
+		checkTest(valid_data, 1024, valid_label, True, valid_sl,bins, nclasses)
+		print("Model saved in path: %s" % save_path)
+		best_f1 = val_f1
diff --git a/functions.py b/functions.py
new file mode 100644
index 0000000000000000000000000000000000000000..464be11ca6a37aa731d1f6b065cf37ddca8ecd41
--- /dev/null
+++ b/functions.py
@@ -0,0 +1,52 @@
+import sys
+import time
+import os
+import tensorflow as tf
+print (tf.__version__)
+import numpy as np
+from sklearn.metrics import accuracy_score
+from sklearn.metrics import f1_score
+from sklearn.preprocessing import LabelEncoder
+from sklearn.utils import shuffle
+from sklearn.metrics import accuracy_score
+from sklearn.metrics import f1_score
+from tensorflow.contrib import rnn
+from tensorflow.contrib.rnn import GRUCell, static_rnn
+
+#CNN1D
+def CNN(x, n_filters, dropOut, is_training_ph):
+	conv1 = tf.keras.layers.Conv1D(filters=n_filters/4, kernel_size=3, activation="relu")(x)
+	conv1 = tf.layers.batch_normalization(conv1)
+	conv1 = tf.layers.dropout(conv1, rate= dropOut, training=is_training_ph)
+
+	conv2 = tf.keras.layers.Conv1D(filters=n_filters/4, kernel_size=3, activation="relu")(conv1)
+	conv2 = tf.layers.batch_normalization(conv2)
+	conv2 = tf.layers.dropout(conv2, rate= dropOut, training=is_training_ph)
+
+	conv3 = tf.keras.layers.Conv1D(filters=n_filters/4, kernel_size=3, activation="relu")(conv2)
+	conv3 = tf.layers.batch_normalization(conv3)
+	conv3 = tf.layers.dropout(conv3, rate= dropOut, training=is_training_ph)
+
+	conv4 = tf.keras.layers.Conv1D(filters=n_filters/4, kernel_size=3, activation="relu")(conv3)
+	conv4 = tf.layers.batch_normalization(conv4)
+	conv4 = tf.layers.dropout(conv4, rate= dropOut, training=is_training_ph)
+
+	conv5 = tf.keras.layers.Conv1D(filters=n_filters/2, strides=2, kernel_size=3, activation="relu")(conv4)
+	conv5 = tf.layers.batch_normalization(conv5)
+	conv5 = tf.layers.dropout(conv5, rate= dropOut, training=is_training_ph)
+
+	conv6 = tf.keras.layers.Conv1D(filters=n_filters/2, kernel_size=3, activation="relu")(conv5)
+	conv6 = tf.layers.batch_normalization(conv6)
+	conv6 = tf.layers.dropout(conv6, rate= dropOut, training=is_training_ph)
+
+	conv7 = tf.keras.layers.Conv1D(filters=n_filters/2, kernel_size=1, activation="relu")(conv6)
+	conv7 = tf.layers.batch_normalization(conv7)
+	conv7 = tf.layers.dropout(conv7, rate= dropOut, training=is_training_ph)
+
+	conv8 = tf.keras.layers.Conv1D(filters=n_filters/2, kernel_size=1, activation="relu")(conv7)
+	conv8 = tf.layers.batch_normalization(conv8)
+	conv8 = tf.layers.dropout(conv8, rate= dropOut, training=is_training_ph)
+
+	conv8 = tf.concat((conv8, conv7), axis=2)
+	pool = tf.keras.layers.GlobalAveragePooling1D()(conv8)
+	return pool