Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Feret Jean-Baptiste
biodivMapR
Commits
a2175a63
Commit
a2175a63
authored
Jul 05, 2019
by
De Boissieu Florian
Browse files
fix part devtools::check and remove NLPCA (commented in code)
parent
53d17265
Changes
10
Hide whitespace changes
Inline
Side-by-side
DESCRIPTION
View file @
a2175a63
...
...
@@ -8,7 +8,7 @@ Description: this packages allows processing image data based on the method desc
It expects an image file as input, with a specific data format.
ENVI HDR image with BIL interleave required
Encoding: UTF-8
License: GPL3
License: GPL
-
3
LazyData: true
Imports:
dissUtils,
...
...
@@ -23,6 +23,7 @@ Imports:
rgdal,
R.utils,
snow,
stringr,
tools,
vegan,
zip
...
...
LICENSE
.txt
→
LICENSE
View file @
a2175a63
File moved
NAMESPACE
View file @
a2175a63
...
...
@@ -22,5 +22,7 @@ importFrom(future.apply,future_lapply)
importFrom(labdsv,pco)
importFrom(matlab,padarray)
importFrom(matrixStats,rowSds)
importFrom(raster,writeRaster)
importFrom(rgdal,readOGR)
importFrom(snow,splitRows)
importFrom(stringr,str_count)
R/Lib_ImageProcess.R
View file @
a2175a63
...
...
@@ -997,9 +997,10 @@ Where.To.Write.Kernel <- function(HDR.SS, HDR.SSD, nbPieces, SE.Size) {
# @param headerFpath Path of the hdr file
#
# @return
#' @importFrom stringr str_count
write.ENVI.header
<-
function
(
header
,
headerFpath
)
{
h
<-
lapply
(
header
,
function
(
x
)
{
if
(
length
(
x
)
>
1
||
(
is.character
(
x
)
&&
stringr
::
str_count
(
x
,
"\\w+"
)
>
1
))
{
if
(
length
(
x
)
>
1
||
(
is.character
(
x
)
&&
str_count
(
x
,
"\\w+"
)
>
1
))
{
x
<-
paste0
(
"{"
,
paste
(
x
,
collapse
=
","
),
"}"
)
}
# convert last numerics
...
...
R/Lib_MapBetaDiversity.R
View file @
a2175a63
...
...
@@ -129,7 +129,6 @@ Get.Sunlit.Pixels <- function(ImPathSunlit, MinSun) {
#' @importFrom future.apply future_lapply
Compute.NMDS
<-
function
(
MatBCdist
)
{
nbiterNMDS
<-
4
library
(
doParallel
)
if
(
Sys.info
()[
"sysname"
]
==
"Windows"
)
{
nbCoresNMDS
<-
2
}
else
if
(
Sys.info
()[
"sysname"
]
==
"Linux"
)
{
...
...
R/Lib_PerformPCA.R
View file @
a2175a63
...
...
@@ -14,7 +14,7 @@
#' @param ImNames Path and name of the images to be processed
#' @param Output.Dir output directory
#' @param Continuum.Removal boolean: should continuum removal be applied?
#' @param TypePCA Type of PCA
(PCA, SPCA, NLPCA...)
#' @param TypePCA Type of PCA
: "PCA" or "SPCA"
#' @param FilterPCA boolean. If TRUE 2nd filtering based on PCA
#' @param Excluded.WL boolean. Water Vapor Absorption domains (in nanometers). Can also be used to exclude spectific domains
#' @param NbIter numeric. Number of iteration to estimate diversity from the raster.
...
...
@@ -61,12 +61,12 @@ Perform.PCA.Image <- function(ImPath, ImPathShade, Output.Dir, Continuum.Removal
print
(
"perform PCA#1 on the subset image"
)
if
(
TypePCA
==
"PCA"
|
TypePCA
==
"SPCA"
)
{
PCA.model
<-
pca
(
DataSubset
,
TypePCA
)
}
else
if
(
TypePCA
==
"NLPCA"
)
{
print
(
"performing NL-PCA with autoencoder"
)
print
(
"Make sure you properly installed and defined python environment if using this functionality"
)
tic
()
PCA.model
<-
nlpca
(
DataSubset
)
toc
()
#
} else if (TypePCA == "NLPCA") {
#
print("performing NL-PCA with autoencoder")
#
print("Make sure you properly installed and defined python environment if using this functionality")
#
tic()
#
PCA.model <- nlpca(DataSubset)
#
toc()
}
# if PCA based filtering:
...
...
@@ -111,11 +111,11 @@ Perform.PCA.Image <- function(ImPath, ImPathShade, Output.Dir, Continuum.Removal
print
(
"perform PCA#2 on the subset image"
)
if
(
TypePCA
==
"PCA"
|
TypePCA
==
"SPCA"
)
{
PCA.model
<-
pca
(
DataSubset
,
TypePCA
)
}
else
if
(
TypePCA
==
"NLPCA"
)
{
print
(
"performing NL-PCA with autoencoder"
)
tic
()
PCA.model
<-
nlpca
(
DataSubset
)
toc
()
#
} else if (TypePCA == "NLPCA") {
#
print("performing NL-PCA with autoencoder")
#
tic()
#
PCA.model <- nlpca(DataSubset)
#
toc()
}
}
# Number of PCs computed and written in the PCA file: 30 if hyperspectral
...
...
@@ -242,24 +242,24 @@ Filter.PCA <- function(ImPath, HDR, ImPathShade, Shade.Update, Spectral, CR, PCA
# Apply PCA
if
(
TypePCA
==
"PCA"
|
TypePCA
==
"SPCA"
)
{
Image.Chunk
<-
t
(
t
(
PCA.model
$
eiV
[,
PCsel
])
%*%
t
(
Center.Reduce
(
Image.Chunk
,
PCA.model
$
mu
,
PCA.model
$
scale
)))
}
else
if
(
TypePCA
==
"NLPCA"
)
{
# apply a transformation to the dataset
Image.Chunk
<-
apply
(
Image.Chunk
,
2
,
minmax
,
mode
=
"apply"
,
MinX
=
PCA.model
$
MinVal
,
MaxX
=
PCA.model
$
MaxVal
)
Image.Chunk2
<-
matrix
(
NA
,
nrow
=
length
(
Image.Chunk
[[
1
]]
$
data
),
ncol
=
length
(
Image.Chunk
))
for
(
ii
in
1
:
length
(
Image.Chunk
))
{
Image.Chunk2
[,
ii
]
<-
matrix
(
Image.Chunk
[[
ii
]]
$
data
,
ncol
=
1
)
}
Image.Chunk
<-
Image.Chunk2
rm
(
Image.Chunk2
)
intermediate_layer_model
<-
keras_model
(
inputs
=
PCA.model
$
Model
$
input
,
outputs
=
get_layer
(
PCA.model
$
Model
,
"bottleneck"
)
$
output
)
# # use multithread to apply autoencoder...
# plan(multiprocess, workers = nbCPU) ## Parallelize using four cores
# Image.Chunk = splitRows(Image.Chunk, nbCPU)
# Image.Chunk = future_lapply(Image.Chunk,FUN = predict.from.NLPCA,model = intermediate_layer_model,future.scheduling = 1.0)
# plan(sequential)
# Image.Chunk = rbind(Image.Chunk)
Image.Chunk
<-
predict
(
intermediate_layer_model
,
Image.Chunk
)
#
} else if (TypePCA == "NLPCA") {
#
# apply a transformation to the dataset
#
Image.Chunk <- apply(Image.Chunk, 2, minmax, mode = "apply", MinX = PCA.model$MinVal, MaxX = PCA.model$MaxVal)
#
Image.Chunk2 <- matrix(NA, nrow = length(Image.Chunk[[1]]$data), ncol = length(Image.Chunk))
#
for (ii in 1:length(Image.Chunk)) {
#
Image.Chunk2[, ii] <- matrix(Image.Chunk[[ii]]$data, ncol = 1)
#
}
#
Image.Chunk <- Image.Chunk2
#
rm(Image.Chunk2)
#
intermediate_layer_model <- keras_model(inputs = PCA.model$Model$input, outputs = get_layer(PCA.model$Model, "bottleneck")$output)
#
#
# # use multithread to apply autoencoder...
#
# plan(multiprocess, workers = nbCPU) ## Parallelize using four cores
#
# Image.Chunk = splitRows(Image.Chunk, nbCPU)
#
# Image.Chunk = future_lapply(Image.Chunk,FUN = predict.from.NLPCA,model = intermediate_layer_model,future.scheduling = 1.0)
#
# plan(sequential)
#
# Image.Chunk = rbind(Image.Chunk)
#
Image.Chunk <- predict(intermediate_layer_model, Image.Chunk)
}
# get PCA of the group of line and rearrange the data to write it correctly in the output file
...
...
@@ -395,24 +395,24 @@ Create.PCA.Image <- function(ImPath, ImPathShade, PCA.Path, PCA.model, Spectral,
# Apply PCA
if
(
TypePCA
==
"PCA"
|
TypePCA
==
"SPCA"
)
{
Image.Chunk
<-
t
(
t
(
PCA.model
$
eiV
[,
1
:
Nb.PCs
])
%*%
t
(
Center.Reduce
(
Image.Chunk
,
PCA.model
$
mu
,
PCA.model
$
scale
)))
}
else
if
(
TypePCA
==
"NLPCA"
)
{
# apply a transformation to the dataset
Image.Chunk
<-
apply
(
Image.Chunk
,
2
,
minmax
,
mode
=
"apply"
,
MinX
=
PCA.model
$
MinVal
,
MaxX
=
PCA.model
$
MaxVal
)
Image.Chunk2
<-
matrix
(
NA
,
nrow
=
length
(
Image.Chunk
[[
1
]]
$
data
),
ncol
=
length
(
Image.Chunk
))
for
(
ii
in
1
:
length
(
Image.Chunk
))
{
Image.Chunk2
[,
ii
]
<-
matrix
(
Image.Chunk
[[
ii
]]
$
data
,
ncol
=
1
)
}
Image.Chunk
<-
Image.Chunk2
rm
(
Image.Chunk2
)
intermediate_layer_model
<-
keras_model
(
inputs
=
PCA.model
$
Model
$
input
,
outputs
=
get_layer
(
PCA.model
$
Model
,
"bottleneck"
)
$
output
)
# # use multithread to apply autoencoder...
# plan(multiprocess, workers = nbCPU) ## Parallelize using four cores
# Image.Chunk = splitRows(Image.Chunk, nbCPU)
# Image.Chunk = future_lapply(Image.Chunk,FUN = predict.from.NLPCA,model = intermediate_layer_model,future.scheduling = 1.0)
# plan(sequential)
# Image.Chunk = rbind(Image.Chunk)
Image.Chunk
<-
predict
(
intermediate_layer_model
,
Image.Chunk
)
#
} else if (TypePCA == "NLPCA") {
#
# apply a transformation to the dataset
#
Image.Chunk <- apply(Image.Chunk, 2, minmax, mode = "apply", MinX = PCA.model$MinVal, MaxX = PCA.model$MaxVal)
#
Image.Chunk2 <- matrix(NA, nrow = length(Image.Chunk[[1]]$data), ncol = length(Image.Chunk))
#
for (ii in 1:length(Image.Chunk)) {
#
Image.Chunk2[, ii] <- matrix(Image.Chunk[[ii]]$data, ncol = 1)
#
}
#
Image.Chunk <- Image.Chunk2
#
rm(Image.Chunk2)
#
intermediate_layer_model <- keras_model(inputs = PCA.model$Model$input, outputs = get_layer(PCA.model$Model, "bottleneck")$output)
#
#
# # use multithread to apply autoencoder...
#
# plan(multiprocess, workers = nbCPU) ## Parallelize using four cores
#
# Image.Chunk = splitRows(Image.Chunk, nbCPU)
#
# Image.Chunk = future_lapply(Image.Chunk,FUN = predict.from.NLPCA,model = intermediate_layer_model,future.scheduling = 1.0)
#
# plan(sequential)
#
# Image.Chunk = rbind(Image.Chunk)
#
Image.Chunk <- predict(intermediate_layer_model, Image.Chunk)
}
# get PCA of the group of line and rearrange the data to write it correctly in the output file
...
...
@@ -444,97 +444,97 @@ Create.PCA.Image <- function(ImPath, ImPathShade, PCA.Path, PCA.model, Spectral,
}
# Function to perform NLPCA
# # Function to perform NLPCA
# #
# # @param DataSubset matrix to apply NLPCA on
# #
# # @return list of PCA parameters (PCs from X, mean, eigenvectors and values)
# nlpca <- function(DataSubset) {
#
# @param DataSubset matrix to apply NLPCA on
# # put between 0 and 1
# x_train <- apply(DataSubset, 2, minmax, mode = "define")
# Subset <- list()
# nb.Vars <- ncol(DataSubset)
# Subset$DataSubset <- matrix(NA, nrow = nrow(DataSubset), ncol = nb.Vars)
# for (i in 1:nb.Vars) {
# Subset$DataSubset[, i] <- matrix(x_train[[i]]$data, ncol = 1)
# Subset$MinVal[i] <- x_train[[i]]$MinX
# Subset$MaxVal[i] <- x_train[[i]]$MaxX
# }
# # define number of pixels to be used for NL-PCA
# nbSubsamples.NLPCA <- 100000
# # autoencoder in keras
# # set training data
# x_train <- as.matrix(Subset$DataSubset)
#
# @return list of PCA parameters (PCs from X, mean, eigenvectors and values)
nlpca
<-
function
(
DataSubset
)
{
# put between 0 and 1
x_train
<-
apply
(
DataSubset
,
2
,
minmax
,
mode
=
"define"
)
Subset
<-
list
()
nb.Vars
<-
ncol
(
DataSubset
)
Subset
$
DataSubset
<-
matrix
(
NA
,
nrow
=
nrow
(
DataSubset
),
ncol
=
nb.Vars
)
for
(
i
in
1
:
nb.Vars
)
{
Subset
$
DataSubset
[,
i
]
<-
matrix
(
x_train
[[
i
]]
$
data
,
ncol
=
1
)
Subset
$
MinVal
[
i
]
<-
x_train
[[
i
]]
$
MinX
Subset
$
MaxVal
[
i
]
<-
x_train
[[
i
]]
$
MaxX
}
# define number of pixels to be used for NL-PCA
nbSubsamples.NLPCA
<-
100000
# autoencoder in keras
# set training data
x_train
<-
as.matrix
(
Subset
$
DataSubset
)
# set model
model
<-
keras_model_sequential
()
if
(
nb.Vars
<
12
)
{
model
%>%
layer_dense
(
units
=
6
,
activation
=
"tanh"
,
input_shape
=
ncol
(
x_train
))
%>%
layer_dense
(
units
=
3
,
activation
=
"tanh"
,
name
=
"bottleneck"
)
%>%
layer_dense
(
units
=
6
,
activation
=
"tanh"
)
%>%
layer_dense
(
units
=
ncol
(
x_train
))
}
else
if
(
nb.Vars
>
100
)
{
model
%>%
layer_dense
(
units
=
100
,
activation
=
"tanh"
,
input_shape
=
ncol
(
x_train
))
%>%
layer_dense
(
units
=
80
,
activation
=
"tanh"
)
%>%
layer_dense
(
units
=
60
,
activation
=
"tanh"
)
%>%
layer_dense
(
units
=
40
,
activation
=
"tanh"
)
%>%
layer_dense
(
units
=
20
,
activation
=
"tanh"
,
name
=
"bottleneck"
)
%>%
layer_dense
(
units
=
40
,
activation
=
"tanh"
)
%>%
layer_dense
(
units
=
60
,
activation
=
"tanh"
)
%>%
layer_dense
(
units
=
80
,
activation
=
"tanh"
)
%>%
layer_dense
(
units
=
100
,
activation
=
"tanh"
)
%>%
layer_dense
(
units
=
ncol
(
x_train
))
}
else
if
(
nb.Vars
<=
100
)
{
model
%>%
layer_dense
(
units
=
80
,
activation
=
"tanh"
,
input_shape
=
ncol
(
x_train
))
%>%
layer_dense
(
units
=
60
,
activation
=
"tanh"
)
%>%
layer_dense
(
units
=
40
,
activation
=
"tanh"
)
%>%
layer_dense
(
units
=
20
,
activation
=
"tanh"
,
name
=
"bottleneck"
)
%>%
layer_dense
(
units
=
40
,
activation
=
"tanh"
)
%>%
layer_dense
(
units
=
60
,
activation
=
"tanh"
)
%>%
layer_dense
(
units
=
80
,
activation
=
"tanh"
)
%>%
layer_dense
(
units
=
ncol
(
x_train
))
}
# view model layers
summary
(
model
)
# compile model
model
%>%
keras
::
compile
(
loss
=
"mean_squared_error"
,
optimizer
=
"adam"
)
# fit model
Sampling
<-
sample
(
nrow
(
x_train
))
model
%>%
keras
::
fit
(
x
=
x_train
[
Sampling
[
1
:
nbSubsamples.NLPCA
],
],
y
=
x_train
[
Sampling
[
1
:
nbSubsamples.NLPCA
],
],
epochs
=
100
,
verbose
=
0
)
# # evaluate the performance of the model
# mse.ae2 <- keras::evaluate(model, x_train, x_train)
# mse.ae2
# extract the bottleneck layer
intermediate_layer_model
<-
keras_model
(
inputs
=
model
$
input
,
outputs
=
get_layer
(
model
,
"bottleneck"
)
$
output
)
intermediate_output
<-
predict
(
intermediate_layer_model
,
x_train
)
my_list
<-
list
(
"Model"
=
model
,
"dataPCA"
=
intermediate_output
,
"MinVal"
=
Subset
$
MinVal
,
"MaxVal"
=
Subset
$
MaxVal
)
return
(
my_list
)
}
# applies NLPCA to data
# # set model
# model <- keras_model_sequential()
# if (nb.Vars < 12) {
# model %>%
# layer_dense(units = 6, activation = "tanh", input_shape = ncol(x_train)) %>%
# layer_dense(units = 3, activation = "tanh", name = "bottleneck") %>%
# layer_dense(units = 6, activation = "tanh") %>%
# layer_dense(units = ncol(x_train))
# } else if (nb.Vars > 100) {
# model %>%
# layer_dense(units = 100, activation = "tanh", input_shape = ncol(x_train)) %>%
# layer_dense(units = 80, activation = "tanh") %>%
# layer_dense(units = 60, activation = "tanh") %>%
# layer_dense(units = 40, activation = "tanh") %>%
# layer_dense(units = 20, activation = "tanh", name = "bottleneck") %>%
# layer_dense(units = 40, activation = "tanh") %>%
# layer_dense(units = 60, activation = "tanh") %>%
# layer_dense(units = 80, activation = "tanh") %>%
# layer_dense(units = 100, activation = "tanh") %>%
# layer_dense(units = ncol(x_train))
# } else if (nb.Vars <= 100) {
# model %>%
# layer_dense(units = 80, activation = "tanh", input_shape = ncol(x_train)) %>%
# layer_dense(units = 60, activation = "tanh") %>%
# layer_dense(units = 40, activation = "tanh") %>%
# layer_dense(units = 20, activation = "tanh", name = "bottleneck") %>%
# layer_dense(units = 40, activation = "tanh") %>%
# layer_dense(units = 60, activation = "tanh") %>%
# layer_dense(units = 80, activation = "tanh") %>%
# layer_dense(units = ncol(x_train))
# }
# # view model layers
# summary(model)
# # compile model
# model %>% keras::compile(
# loss = "mean_squared_error",
# optimizer = "adam"
# )
#
# @param Data matrix to apply NLPCA on
# @param model.AutoEncod model defined previously
#
# @return Image.Chunk after NLPCA
#' @importFrom future.apply future_lapply
predict.from.NLPCA
<-
function
(
Data
,
model.AutoEncod
)
{
Image.Chunk
<-
future_lapply
(
Data
,
FUN
=
predict.from.NLPCA
,
model
=
model.AutoEncod
,
future.scheduling
=
1.0
)
return
(
Image.Chunk
)
}
# # fit model
# Sampling <- sample(nrow(x_train))
# model %>% keras::fit(
# x = x_train[Sampling[1:nbSubsamples.NLPCA], ],
# y = x_train[Sampling[1:nbSubsamples.NLPCA], ],
# epochs = 100,
# verbose = 0
# )
# # # evaluate the performance of the model
# # mse.ae2 <- keras::evaluate(model, x_train, x_train)
# # mse.ae2
# # extract the bottleneck layer
# intermediate_layer_model <- keras_model(inputs = model$input, outputs = get_layer(model, "bottleneck")$output)
# intermediate_output <- predict(intermediate_layer_model, x_train)
# my_list <- list("Model" = model, "dataPCA" = intermediate_output, "MinVal" = Subset$MinVal, "MaxVal" = Subset$MaxVal)
# return(my_list)
# }
# # applies NLPCA to data
# #
# # @param Data matrix to apply NLPCA on
# # @param model.AutoEncod model defined previously
# #
# # @return Image.Chunk after NLPCA
# # @importFrom future.apply future_lapply
# predict.from.NLPCA <- function(Data, model.AutoEncod) {
# Image.Chunk <- future_lapply(Data, FUN = predict.from.NLPCA, model = model.AutoEncod, future.scheduling = 1.0)
# return(Image.Chunk)
# }
# Function to perform PCA on a matrix
#
...
...
@@ -620,7 +620,7 @@ Define.Pixels.Per.Iter <- function(ImNames, NbIter = NbIter) {
#' @param Output.Dir output directory
#' @param Input.Image.File path for image to be processed
#' @param PCA.Files path of PCA files
#' @param TypePCA Type of PCA
(PCA, SPCA, NLPCA...)
#' @param TypePCA Type of PCA
: "PCA" or "SPCA"
#'
#' @return nothing
#' @export
...
...
R/Lib_Validation_biodivMapR.R
View file @
a2175a63
...
...
@@ -313,10 +313,10 @@ Get.Diversity.From.Plots = function(Raster, Plots,NbClusters = 50,Name.Plot = FA
# @param quiet
# @return NULL
#' @importFrom rgdal readOGR
#' @importFrom raster writeRaster
gdal_polygonizeR
=
function
(
x
,
outshape
=
NULL
,
gdalformat
=
'ESRI Shapefile'
,
pypath
=
NULL
,
readpoly
=
TRUE
,
quiet
=
TRUE
)
{
if
(
readpoly
==
TRUE
)
require
(
rgdal
)
if
(
is.null
(
pypath
))
{
pypath
<-
Sys.which
(
'gdal_polygonize.py'
)
}
...
...
@@ -335,7 +335,6 @@ gdal_polygonizeR = function(x, outshape=NULL, gdalformat = 'ESRI Shapefile',
# sep='.')[f.exists])), call.=FALSE)
}
else
outshape
<-
tempfile
()
if
(
is
(
x
,
'Raster'
))
{
require
(
raster
)
writeRaster
(
x
,
{
f
<-
tempfile
(
fileext
=
'.tif'
)})
rastpath
<-
normalizePath
(
f
)
}
else
if
(
is.character
(
x
))
{
...
...
TODO
LIST.txt
→
TODO
.md
View file @
a2175a63
File moved
man/Perform.PCA.Image.Rd
View file @
a2175a63
...
...
@@ -13,7 +13,7 @@ Perform.PCA.Image(ImPath, ImPathShade, Output.Dir,
\item{Continuum.Removal}{boolean: should continuum removal be applied?}
\item{TypePCA}{Type of PCA
(PCA, SPCA, NLPCA...)
}
\item{TypePCA}{Type of PCA
: "PCA" or "SPCA"
}
\item{FilterPCA}{boolean. If TRUE 2nd filtering based on PCA}
...
...
man/Select.Components.Rd
View file @
a2175a63
...
...
@@ -14,7 +14,7 @@ Select.Components(Input.Image.File, Output.Dir, PCA.Files,
\item{PCA.Files}{path of PCA files}
\item{TypePCA}{Type of PCA
(PCA, SPCA, NLPCA...)
}
\item{TypePCA}{Type of PCA
: "PCA" or "SPCA"
}
}
\value{
nothing
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment