sesam.py 4.26 KB
Newer Older
Dino Ienco's avatar
Dino Ienco committed
from keras.layers import Input, Dense
from keras.models import Model
from keras import optimizers
from keras.callbacks import LearningRateScheduler
from keras.callbacks import EarlyStopping
import glob
from sklearn.preprocessing import MinMaxScaler
import keras

import scipy.io as sio
import numpy as np
import sys
import os
import random
from sklearn import preprocessing
from random import randint
#from cop_kmeans import cop_kmeans, l2_distance
import math

import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.5
#set_session(tf.Session(config=config))


def deepSSAEMulti(n_dim, n_hidden1, n_hidden2, n_classes):
	input_layer = Input(shape=(n_dim,))
	encoded = Dense(n_hidden1, activation='relu')(input_layer)
	encoded = Dense(n_hidden2, activation='relu', name="low_dim_features")(encoded)
	decoded = Dense(n_hidden1, activation='relu')(encoded)
	decoded = Dense(n_dim, activation='sigmoid')(decoded)

	classifier = Dense(n_classes, activation='softmax')(encoded)

	rmsPropOpt = optimizers.RMSprop(lr=0.0005)
	rmsPropOpt1 = optimizers.RMSprop(lr=0.0005)
	autoencoder = Model(inputs=[input_layer], outputs=[decoded])
	autoencoder.compile(optimizer=rmsPropOpt, loss=['mse'])

	ssautoencoder = Model(inputs=[input_layer], outputs=[decoded, classifier])
	ssautoencoder.compile(optimizer=rmsPropOpt1, loss=['mse','categorical_crossentropy'], loss_weights=[1., 1.])
	return [autoencoder, ssautoencoder]


def feature_extraction(model, data, layer_name):
	feat_extr = Model(inputs= model.input, outputs= model.get_layer(layer_name).output)
	return feat_extr.predict(data)

def learn_SingleReprSS(X_tot, idx_train, Y):
	n_classes = len(np.unique(Y))
	idx_train = idx_train.astype("int")
	X_train = X_tot[idx_train]
	Y_train = Y[idx_train]
	encoded_Y_train = keras.utils.to_categorical(Y_train, n_classes)
	n_row, n_col = X_tot.shape

	perc_50 = math.ceil( n_col -1)
	perc_10 = math.ceil( n_col * 0.5)
	perc_5 = math.ceil( (n_col * 0.5) - 1)
	perc_1 = math.ceil( n_col * 0.25)

	n_hidden1 = randint(perc_10, perc_50)
	n_hidden2 = randint(perc_1, perc_5)

	ae, ssae = deepSSAEMulti(n_col, n_hidden1, n_hidden2, n_classes)
	for i in range(200):
		print "epoch: %d" % i
		ae.fit(X_tot, X_tot, epochs=1, batch_size=64, shuffle=True, verbose=1)
		ssae.fit(X_train, [X_train, encoded_Y_train], epochs=1, batch_size=8, shuffle=True, verbose=1)
	new_train_feat = feature_extraction(ae, X_tot, "low_dim_features")
	return new_train_feat


def learn_representationSS(X_tot, idx_train, Y, ens_size):
	intermediate_reprs = np.array([])
	for l in range(ens_size):
		embeddings = learn_SingleReprSS(X_tot, idx_train, Y)
		if intermediate_reprs.size == 0:
			intermediate_reprs = embeddings
		else:
			intermediate_reprs = np.column_stack((intermediate_reprs, embeddings))
	return intermediate_reprs


def normData(data):
	X = np.array(data)
	scaler = MinMaxScaler()
	scaler.fit(X)
	return scaler.transform(X)


if __name__ == "__main__":
    #Directory Name on which data are stored
    directory = sys.argv[1]

    #File in the directory/labels folder with label information
    #The file has as many row as the number of labeled example
    #Each row has two information:  the position of the labeled example w.r.t. the data file data.npy, the associated label
    fileName = sys.argv[2]

    dataset_name = directory+"/data.npy"
    dataset_cl_name = directory+"/class.npy"

    dataset = np.load(dataset_name)
    dataset = normData(dataset)

    dataset_cl = np.load(dataset_cl_name)

    #Size of the ensemble
    ens_size = 30
    dirEmb = "embeddings"
    dir_path = directory+"/"+dirEmb

    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    #for fileName in files:
    fName = fileName.split("/")[-1]
    run_id, nsamples = fName.split(".")[0].split("_")
    outFileName = dir_path+"/"+run_id+"_"+nsamples+".npy"
    if os.path.exists(outFileName):
        print "ALREADY EXIST %s" % outFileName
        exit()
    print "CREATE EMBEDDINGS for the file %s" % fileName


    sys.stdout.flush()
    idx_cl = np.load(fileName)
    idx_train = idx_cl[:,0]


    new_feat_ssae = learn_representationSS(dataset, idx_train, dataset_cl, ens_size)
    outFileName = dir_path+"/"+run_id+"_"+nsamples+".npy"
    np.save(outFileName, new_feat_ssae)