import sys
import time
import os
import tensorflow as tf
print (tf.__version__)
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import shuffle
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from tensorflow.contrib import rnn
from tensorflow.contrib.rnn import GRUCell, static_rnn

#CNN1D
def CNN(x, n_filters, dropOut, is_training_ph):
	conv1 = tf.keras.layers.Conv1D(filters=n_filters/4, kernel_size=3, activation="relu")(x)
	conv1 = tf.layers.batch_normalization(conv1)
	conv1 = tf.layers.dropout(conv1, rate= dropOut, training=is_training_ph)

	conv2 = tf.keras.layers.Conv1D(filters=n_filters/4, kernel_size=3, activation="relu")(conv1)
	conv2 = tf.layers.batch_normalization(conv2)
	conv2 = tf.layers.dropout(conv2, rate= dropOut, training=is_training_ph)

	conv3 = tf.keras.layers.Conv1D(filters=n_filters/4, kernel_size=3, activation="relu")(conv2)
	conv3 = tf.layers.batch_normalization(conv3)
	conv3 = tf.layers.dropout(conv3, rate= dropOut, training=is_training_ph)

	conv4 = tf.keras.layers.Conv1D(filters=n_filters/4, kernel_size=3, activation="relu")(conv3)
	conv4 = tf.layers.batch_normalization(conv4)
	conv4 = tf.layers.dropout(conv4, rate= dropOut, training=is_training_ph)

	conv5 = tf.keras.layers.Conv1D(filters=n_filters/2, strides=2, kernel_size=3, activation="relu")(conv4)
	conv5 = tf.layers.batch_normalization(conv5)
	conv5 = tf.layers.dropout(conv5, rate= dropOut, training=is_training_ph)

	conv6 = tf.keras.layers.Conv1D(filters=n_filters/2, kernel_size=3, activation="relu")(conv5)
	conv6 = tf.layers.batch_normalization(conv6)
	conv6 = tf.layers.dropout(conv6, rate= dropOut, training=is_training_ph)

	conv7 = tf.keras.layers.Conv1D(filters=n_filters/2, kernel_size=1, activation="relu")(conv6)
	conv7 = tf.layers.batch_normalization(conv7)
	conv7 = tf.layers.dropout(conv7, rate= dropOut, training=is_training_ph)

	conv8 = tf.keras.layers.Conv1D(filters=n_filters/2, kernel_size=1, activation="relu")(conv7)
	conv8 = tf.layers.batch_normalization(conv8)
	conv8 = tf.layers.dropout(conv8, rate= dropOut, training=is_training_ph)

	conv8 = tf.concat((conv8, conv7), axis=2)
	pool = tf.keras.layers.GlobalAveragePooling1D()(conv8)
	return pool

def getSL(data):
	sl = []
	for el in data:
		s = np.sum(el,axis=1)
		idx = np.where(s == 0)
		# get only the row indices corresponding to the previous condition
		row_idx = idx[0]
		#if no component is empty
		if len(row_idx) == 0:
			#assign to the object a number of components equal to the maximum number: len(s)
			sl.append(len(s))
		else:
			#assign to the object a number of components equal idx[0] since the returned indices are ordered from the smaller to the bigger
			sl.append(row_idx[0])
	return np.array(sl)