@preprint{tran2023beat,title={One to beat them all: "RYU'' - a unifying framework for the construction of safe balls},author={Tran, Thu-Le and Elvira, Clément and Dang, Hong-Phuong and Herzet, Cédric},year={2023},hal={hal-04318380},}
Bayes in action in deep learning and dictionary learning
@preprint{arble2022bayes,author={Arbel, Julyan and Dang, Hong-Phuong and Elvira, Clément and Herzet, Cédric and Naulet, Zacharie and Vladimirova, Mariia},title={Bayes in action in deep learning and dictionary learning},year={2022},}
International journal papers (4 entries)
Safe Rules for the Identification of Zeros in the Solutions of the SLOPE Problem
@article{elvira2021safeSLOPE,title={Safe Rules for the Identification of Zeros in the Solutions of the {SLOPE} Problem},author={Elvira, Clément and Herzet, Cédric},year={2023},journal={{SIAM} Journal on Mathematics of Data Science},month=mar,publisher={Society for Industrial {\&} Applied Mathematics ({SIAM})},volume={5},number={1},pages={147--173},hal={hal-03400322},doi={10.1137/21m1457631},}
When does OMP achieve exact recovery with continuous dictionaries?
This paper presents new theoretical results on sparse recovery guarantees for a greedy algorithm, Orthogonal Matching Pursuit (OMP), in the context of continuous parametric dictionaries. Here, the continuous setting means that the dictionary is made up of an infinite (potentially uncountable) number of atoms. In this work, we rely on the Hilbert structure of the observation space to express our recovery results as a property of the kernel defined by the inner product between two atoms. Using a continuous extension of Tropp’s Exact Recovery Condition, we identify two key notions of admissible kernel and admissible support that are sufficient to ensure exact recovery with OMP. We exhibit a family of admissible kernels relying on completely monotone functions for which admissibility holds for any support in the one-dimensional setting. For higher dimensional parameter spaces, an additional notion of axis admissibility is shown to be sufficient to ensure a form of delayed recovery. An additional algebraic condition involving a finite subset of (known) atoms further yields exact recovery guarantees. Finally, a coherence-based viewpoint on these results provides recovery guarantees in terms of a minimum separation assumption.
@article{elvira2021,title={When does OMP achieve exact recovery with continuous dictionaries?},author={Elvira, Cl{\'e}ment and Gribonval, R{\'e}mi and Soussen, Charles and Herzet, Cédric},journal={Applied and Computational Harmonic Analysis},volume={51},pages={374 - 413},year={2021},issn={1063-5203},doi={10.1016/j.acha.2020.12.002},url={http://www.sciencedirect.com/science/article/pii/S1063520320300841},hal={hal-02099464v4},}
Spreading the information over all coefficients of a representation is a desirable property in many applications such as digital communication or machine learning. This so-called antisparse representation can be obtained by solving a convex program involving an l_∞-norm penalty combined with a quadratic discrepancy. In this paper, we propose a new methodology , dubbed safe squeezing, to accelerate the computation of antisparse representation. We describe a test that allows to detect saturated entries in the solution of the optimization problem. The contribution of these entries is compacted into a single vector, thus operating a form of dimensionality reduction. We propose two algorithms to solve the resulting lower dimensional problem. Numerical experiments show the effectiveness of the proposed method to detect the saturated components of the solution and illustrates the induced computational gains in the resolution of the antisparse problem.
@article{elvira2020_TSP,title={{Safe Squeezing for Antisparse Coding}},author={Elvira, Cl{\'e}ment and Herzet, Cedric},hal={hal-02368134},year={2020},doi={10.1109/tsp.2020.2995192},url={https://doi.org/10.1109/tsp.2020.2995192},publisher={Institute of Electrical and Electronics Engineers ({IEEE})},pages={1--1},journal={{IEEE} Transactions on Signal Processing},keywords={antisparse coding ; safe screening ; scaled projected-gradient algorithm ; Frank-Wolfe algorithm},}
Sparse representations have proven their efficiency in solving a wide class of inverse problems encountered in signal and image processing. Conversely, enforcing the information to be spread uniformly over representation coefficients exhibits relevant properties in various applications such as robust encoding in digital communications. Antisparse regularization can be naturally expressed through an ℓ∞-norm penalty. This paper derives a probabilistic formulation of such representations. Anew probability distribution, referred to as the democratic prior, is first introduced. Its main properties as well as three random variate generators for this distribution are derived. Then this probability distribution is used as a prior to promote antisparsity in a Gaussian linear model, yielding a fully Bayesian formulation of antisparse coding. Two Markov chain Monte Carlo algorithms are proposed to generate samples according to the posterior distribution. The first one is a standard Gibbs sampler. The second one uses Metropolis-Hastings moves that exploit the proximity mapping of the log-posterior distribution. These samples are used to approximate maximum aposteriori and minimum mean square error estimators of both parameters and hyperparameters. Simulations on synthetic data illustrate the performances of the two proposed samplers, for both complete and over-complete dictionaries. All results are compared to the recent deterministic variational FITRA algorithm.
@article{elvira2017_TSP,author={Elvira, Clément and Chainais, Pierre and Dobigeon, Nicolas},journal={IEEE Transactions on Signal Processing},title={Bayesian Antisparse Coding},year={2017},volume={65},number={7},pages={1660-1672},doi={10.1109/TSP.2016.2645543},issn={1053-587X},month=apr,hal={hal-01433706},}
International conference papers (15 entries)
Safe peeling for l0-regularized least-squares with supplementary material
@inproceedings{GuyardPeeling23,hal={hal-04118979},author={Guyard, Théo and Monnoyer, Gilles and Elvira, Clément and Herzet, Cédric},booktitle={Accepted at {EUSIPCO}},year={2023},}
Region-free Safe Screening Tests for \ell 1 -penalized Convex Problems
@inproceedings{herzet2022Eusipco,title={{Region-free Safe Screening Tests for {\ell} 1 -penalized Convex Problems}},author={Herzet, C{\'e}dric and Elvira, Cl{\'e}ment and Dang, Hong-Phuong},url={https://hal-centralesupelec.archives-ouvertes.fr/hal-03806099},booktitle={2022 30th European Signal Processing Conference ({EUSIPCO})},doi={10.23919/eusipco55093.2022.9909532},address={Belgrade, Serbia},year={2022},month=aug,keywords={sparsity ; convex problem ; safe screening},hal={hal-03806099},}
Beyond GAP screening for Lasso by exploiting new dual cutting half-spaces
@inproceedings{Le2022HolderDome,author={Tran, Thu-Le and Elvira, Cl{\'e}ment and Dang, Hong-Phuong and Herzet, C{\'e}dric},title={Beyond {GAP} screening for {Lasso} by exploiting new dual cutting half-spaces},year={2022},booktitle={2022 30th European Signal Processing Conference ({EUSIPCO})},doi={},month=aug,hal={hal-03805966},}
Screen & Relax: Accelerating the resolution of Elastic-net by safe identification of the solution support
@inproceedings{guyard2021screen,author={Guyard, Th{\'e}o and Herzet, C{\'e}dric and Elvira, Cl{\'e}ment},booktitle={ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},pages={5443-5447},year={2021},doi={10.1109/ICASSP43922.2022.9747412},hal={hal-03462191},}
Node-screening tests for l0-penalized Least-Squares problem
@inproceedings{elvira2020_itwist,title={{Continuous dictionaries meet low-rank tensor approximations}},author={Elvira, Cl{\'e}ment and Cohen, J{\'e}r{\'e}my E and Herzet, C{\'e}dric and Gribonval, R{\'e}mi},url={https://hal.archives-ouvertes.fr/hal-02567115},booktitle={{iTwist 2020 - International Traveling Workshop on Interactions between low-complexity data models and Sensing Techniques}},address={Nantes, France},pages={1-3},year={2020},month=jun,hal={hal-02567115},}
BLASTER: An Off-Grid Method for Blind and Regularized\{Acoustic Echoes Retrieval
@inproceedings{elvira2020icassp2,title={{BLASTER: An Off-Grid Method for Blind and Regularized\\ Acoustic Echoes Retrieval}},author={Di Carlo, Diego and Elvira, Cl{\'e}ment and Deleforge, Antoine and Bertin, Nancy and Gribonval, Rémi},booktitle={ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},publisher={{IEEE}},year={2020},doi={10.1109/ICASSP40776.2020.9054647},hal={hal-02469901},pages={156-160}}
Short and squeezed: accelerating the computation of antisparse representations with safe squeezing
@inproceedings{elvira2020icassp,title={{Short and squeezed: accelerating the computation of antisparse representations with safe squeezing}},author={Elvira, Cl{\'e}ment and Herzet, Cédric},booktitle={ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},publisher={{IEEE}},year={2020},doi={10.1109/ICASSP40776.2020.9053156},hal={hal-03070344},pages={5615-5619},}
We present new theoretical results on sparse recovery guarantees for a greedy algorithm, orthogonal matching pursuit (OMP), in the context of continuous parametric dictionaries, i.e., made up of an infinite uncountable number of atoms. We build up a family of dictionaries for which k-step recovery is possible.
@inproceedings{elvira2019spars,title={{Uniform k-step recovery with CMF dictionaries}},author={Elvira, Cl{\'e}ment and Gribonval, R{\'e}mi and Herzet, C{\'e}dric and Soussen, Charles},booktitle={{SPARS 2019 - Signal Processing with Adaptive Sparse Structured Representations}},address={Toulouse, France},pages={1-2},year={2019},month=jul,hal={hal-02157561},}
OMP and continuous dictionaries: is k-step recovery possible?
In this work, we present new theoretical results on sparse recovery guarantees for a greedy algorithm, orthogonal matching pursuit (OMP), in the context of continuous parametric dictionaries, i.e., made up of an infinite uncountable number of atoms.We build up a family of dictionaries for which k-step recovery is possible with OMP for 1-dimensional parameters. In higher dimension, algebraic conditions become necessary and will lead us to revisit some well-known k-step discrete analyses. Finally, a toy-example illustrates the level of tightness of our sufficient conditions.
@inproceedings{elvira2019icassp,title={{OMP and continuous dictionaries: is k-step recovery possible?}},author={Elvira, Cl{\'e}ment and Gribonval, R{\'e}mi and Soussen, Charles and Herzet, Cédric},url={https://hal.archives-ouvertes.fr/hal-02049486},booktitle={ICASSP 2019 - 2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},address={Brighton, United Kingdom},organization={{IEEE}},publisher={{IEEE}},doi={10.1109/ICASSP.2019.8683617},issn={2379-190X},year={2019},pages={5546-5550},month=may,keywords={Index Terms-Sparse representation ; exact recovery ; orthogonal matching pursuit ; continuous dictionaries ; sparse representation ; continuous dictio-naries},hal={hal-02049486},}
Parameter-free Small Variance Asymptotics for Dictionary Learning
@inproceedings{dang2019eusipco,author={Dang, Hong-Phuong and Elvira, Clément},booktitle={2019 27th European Signal Processing Conference (EUSIPCO)},title={Parameter-free Small Variance Asymptotics for Dictionary Learning},year={2019},volume={},number={},pages={1-5},doi={10.23919/EUSIPCO.2019.8903025},}
A case of exact recovery with OMP using continuous dictionaries
We present new theoretical results on sparse recovery guarantees for a greedy algorithm, orthogonal matching pursuit (OMP), in the context of continuous dictionaries. Consider a sparse linear combination of atoms from a dictionary parameterized by some real parameters, for example, a combination of shifted versions of a basic waveform as in the context of sparse spike deconvolution. Currently, performance guarantees for greedy algorithms are typically carried out in the discrete setting associated to a grid of atom parameters, and based on, e.g., the coherence of the considered discretized dictionary \cite{tropp2004}. However, such analyses fail to be conclusive for grid-based approaches when the discretization step tends to zero, as the coherence goes to one. Instead, our analysis is directly conducted in the continuous setting. For atoms parametrized by a real parameter that are elements of the (infinite-dimensional) Hilbert space L_2(\mathbb{R}) of square integrable real functions, and such that the inner product between two atoms is the exponential of the negative absolute difference of the corresponding parameters, we show in the noise-free setting that OMP exactly recovers the atom parameters as well as their amplitudes, regardless of the number of distinct atoms. We exhibit a convolutive dictionary of exponentially decaying pulses for which the atoms have an analytic definition while their pairwise inner products have the prescribed form. The established guarantees rely on a proof technique which is the continuous equivalent of Tropp’s Exact Recovery Condition (ERC) \cite{tropp2004}. The proof exploits specific properties of the positive definite kernel between atom parameters defined by the inner product between the corresponding atoms. Future work will aim at characterizing the class of kernels for which such an analysis holds –in particular for higher dimensional parameters– and the compatibility of the guarantees with dimension reduction techniques such as sketching, which would pave the way to provably good greedy algorithms for compressive statistical learning \cite{Gribonval2017}. In light of the existing links between Tropp’s ERC and recovery guarantees for \ell_1 minimization \cite{tropp2006}, an interesting question is whether these guarantees extend to sparse spike recovery with total variation norm minimization \cite{Duval2015,candes2014}.
@inproceedings{elvira2018CS,title={{A case of exact recovery with OMP using continuous dictionaries}},author={Elvira, Cl{\'e}ment and Gribonval, R{\'e}mi and Herzet, Cédric and Soussen, Charles},booktitle={{CS 2018 - 9th International Conference on Curves and Surfaces}},address={Arcachon, France},year={2018},month=jun,hal={hal-01937532},}
Small variance asymptotics and bayesian nonparametrics for dictionary learning
Bayesian nonparametric (BNP) is an appealing framework to infer the complexity of a model along with the parameters. To this aim, sampling or variational methods are often used for inference. However, these methods come with numerical disadvantages for large-scale data. An alternative approach is to relax the probabilistic model into a non-probabilistic formulation which yields a scalable algorithm. One limitation of BNP approaches can be the cost of Monte-Carlo sampling for inference. Small-variance asymptotic (SVA) approaches paves the way to much cheaper though approximate methods for inference by taking benefit from a fruitful interaction between Bayesian models and optimization algorithms. In brief, SVA lets the variance of the noise (or residual error) distribution tend to zero in the optimization problem corresponding to a MAP estimator with finite noise variance for instance. We propose such an SVA analysis of a BNP dictionary learning (DL) approach that automatically adapts the size of the dictionary or the subspace dimension in an efficient way. Numerical experiments illustrate the efficiency of the proposed method.
@inproceedings{elvira2018eusipco,doi={10.23919/eusipco.2018.8553142},year={2018},month=sep,publisher={{IEEE}},author={Elvira, Clement and Dang, Hong-Phuong and Chainais, Pierre},title={Small variance asymptotics and bayesian nonparametrics for dictionary learning},booktitle={2018 26th European Signal Processing Conference ({EUSIPCO})},hal={hal-01961852},}
Principal component analysis is a widely used technique to perform dimension reduction. However, selecting a finite number of significant components is essential and remains a crucial issue. Only few attempts have proposed a probabilistic approach to adaptively select this number. This paper introduces a Bayesian nonparametric model to jointly estimate the principal components and the corresponding intrinsic dimension. More precisely, the observations are projected onto a random orthogonal basis which is assigned a prior distribution defined on the Stiefel manifold. Then the factor scores take benefit of an Indian buffet process prior to model the uncertainty related to the number of components. The parameters of interest as well as the nuisance parameters are finally inferred within a fully Bayesian framework via Monte Carlo sampling. The performances of the proposed approach are assessed thanks to experiments conducted on various examples.
@inproceedings{Elvira_IEEE_ICASSP_2017,author={Elvira, Clément and Chainais, Pierre and Dobigeon, Nicolas},booktitle={2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},doi={10.1109/ICASSP.2017.7952556},keywords={Bayes methods;Estimation;Manifolds;Monte Carlo methods;Numerical models;Principal component analysis;Probabilistic logic;Bayesian inference;Indian buffet process;dimension reduction;distribution on the Stiefel manifold},month=mar,pages={2247-2251},title={Bayesian nonparametric subspace estimation},year={2017},hal={hal-01687163},}
Bayesian nonparametric (BNP) is an appealing framework to infer the complexity of a model along with the parameters. To this aim, sampling or variational methods are often used for inference. However, these methods come with numerical disadvantages for large-scale data. An alternative approach is to relax the probabilistic model into a non-probabilistic formulation which yields a scalable algorithm. One limitation of BNP approaches can be the cost of Monte-Carlo sampling for inference. Small-variance asymptotic (SVA) approaches paves the way to much cheaper though approximate methods for inference by taking benefit from a fruitful interaction between Bayesian models and optimization algorithms. In brief, SVA lets the variance of the noise (or residual error) distribution tend to zero in the optimization problem corresponding to a MAP estimator with finite noise variance for instance. We propose such an SVA analysis of a BNP dictionary learning (DL) approach that automatically adapts the size of the dictionary or the subspace dimension in an efficient way. Numerical experiments illustrate the efficiency of the proposed method.
@inproceedings{elvira2016ssp,author={Elvira, Clément and Chainais, Pierre and Dobigeon, Nicolas},booktitle={2016 IEEE Statistical Signal Processing Workshop (SSP)},title={Democratic prior for anti-sparse coding},year={2016},pages={1-4},doi={10.1109/SSP.2016.7551813},month=jun,hal={hal-01433632},}
National conference papers (6 entries)
Une nouvelle méthode d’accélération pour LASSO par élimination sûre de variables
@inproceedings{tran2022cap,title={{Une nouvelle m{\'e}thode d'acc{\'e}l{\'e}ration pour LASSO par {\'e}limination s{\^u}re de variables}},author={Tran, Thu-Le and Elvira, Cl{\'e}ment and Dang, Hong-Phuong and Herzet, C{\'e}dric},booktitle={{Conf{\'e}rence sur l'Apprentissage automatique}},address={Vannes, France},year={2022},month=jul,keywords={LASSO ; r{\'e}duction de dimensionalit{\'e} ; {\'e}limination de variables ; safe screening},hal={hal-03806044},language={French},}
Screen & Relax: Accélérer la résolution du problème" Elastic-Net" par identification du support de la solution
@inproceedings{guyard2022screen,author={Guyard, Theo and Herzet, C{\'e}dric and Elvira, Cl{\'e}ment},booktitle={GRETSI 2022-XXVIII{\`e}me Colloque Francophone de Traitement du Signal et des Images},year={2022},hal={hal-03778139},language={French},}
Node-screening pour le problème des moindres carrés avec pénalité L0
Theo Guyard, Cédric Herzet, Clément Elvira, and Ayşe Nur Arslan
In GRETSI 2022-XXVIIIème Colloque Francophone de Traitement du Signal et des Images Jul 2022
@inproceedings{guyard2022node,title={Node-screening pour le probl{\`e}me des moindres carr{\'e}s avec p{\'e}nalit{\'e} L0},author={Guyard, Theo and Herzet, C{\'e}dric and Elvira, Cl{\'e}ment and Arslan, Ay{\c{s}}e Nur},booktitle={GRETSI 2022-XXVIII{\`e}me Colloque Francophone de Traitement du Signal et des Images},year={2022},hal={hal-03784682},language={French},}
Identification de supports en k etapes avec OMP pour les dictionnaires continus
Nous présentons de nouveaux résultats concernant les garanties d’identification de support en k étapes pour un algorithme glouton, orthogonal matching pursuit (OMP), pour les dictionnaires continus. Un dictionnaire est dit continu s’il est constitué d’une infinité indénom-brable d’atomes. Nous étudions une famille de dictionnaires paramétrés, appelée CMF (pour completely monotone function), pour laquelle l’identification de support en k étapes est toujours possible lorsque le paramètre est de dimension 1 quels que soient le nombre et le choix des atomes du support. En dimension supérieure, des conditions algébriques deviennent nécessaires et nous amènent à revisiter les analyses classiques du cas discret. Finalement, nous discutons l’implémentation d’une version continue d’OMP.
@inproceedings{elvira2019gretsi,author={Elvira, Cl{\'e}ment and Gribonval, R{\'e}mi and Soussen, Charles and Herzet, C{\'e}dric},url={https://hal.inria.fr/hal-02157571},booktitle={{GRETSI 2019 - XXVII{\`e}me Colloque francophonede traitement du signal et des images}},address={Lille, France},pages={1-4},year={2019},month=aug,language={French},hal={https://hal.inria.fr/hal-02157571v1},}
Vers une méthode d’optimisation non paramétrique pour l’apprentissage de dictionnaire en utilisant Small-Variance Asymptotics pour modèle probabiliste
Dans les modèles probabilistes, les méthodes d’échantillonnage et les approximations variationnelles sont souvent utilisées pour l’inférence. Mais ces méthodes passent difficilement à l’échelle. Une alternative consiste à relâcher le modèle probabiliste dans une formulation non probabiliste et utiliser un algorithme évolutif pour résoudre le problème de minimisation associé. Nous proposons une analyse dite de Small-Variance Asymptotics (SVA) du modèle bayésien non paramétrique de type Buffet Indien à deux paramètres qui apprend automatiquement un dictionnaire de taille adaptée. Cette approche s’obtient en faisant tendre la variance de la vraisemblance du modèle vers 0. L’analyse montre une interaction entre les méthodes bayésiennes et optimisation. Les résultats illustrent la pertinence de la méthode proposée.
@inproceedings{dang2018cap,author={Dang, Hong-Phuong and Elvira, Clément and Chainais, Pierre},title={ Vers une méthode d'optimisation non paramétrique pour l'apprentissage de dictionnaire en utilisant Small-Variance Asymptotics pour modèle probabiliste},booktitle={CAP},year={2018},address={Rouen},language={French},}
Une formulation bayésienne du codage antiparcimonieux
Dans un but de robustesse, un codage antiparcimonieux répartit uniformément l’information d’un signal sur toutes les composantes de sa représentation. La recherche d’un tel codage s’exprime naturellement sous la forme d’un problème variationnel impliquant une régularisation de type \ell_∞. Dans cet article une formulation bayésienne du problème est proposée, impliquant une nouvelle loi de probabilité, la loi démocratique, qui pénalise les fortes amplitudes. Cette distribution est choisie comme loi a priori sur les coefficients de représentation, couplée avec une vraisemblance gaussienne. Les estimateurs bayésiens des coefficients de représentation peuvent être approchés à l’aide d’un échantillonneur de Gibbs. Cette méthode passe cependant difficilement à l’échelle et un algorithme de Monte Carlo proximal a été proposé. On discute une nouvelle façon de choisir et régler la loi a priori sur les paramètres de nuisance. Deux simulations numériques permettent de valider le réglage des hyperparamètres et la recherche du paramètre de régularisation.
@inproceedings{elvira2017gretsi,author={Elvira, Clément and Chainais, Pierre and Dobigeon, Nicolas},title={Une formulation bayésienne du codage antiparcimonieux},booktitle={Actes du XXVIi`eme Colloque GRETSI},year={2017},address={Juan-les-Pins, France},language={French},hal={hal-01691387},}
Technical reports
Supplementary materials: Safe rules for the identification of zeros in the solutions of the SLOPE problem
title = {{Supplementary materials: Safe rules for the identification of zeros in the solutions of the SLOPE problem}},author = {Elvira, Clément and Herzet, Cédric},year = {2021},month = oct,}
A response to “Fast OSCAR and OWL Regression via Safe Screening Rules” by Bao et al.
@techreport{Elvira2021techreport,author={Elvira, Clément and Herzet, Cédric},note={Technical report},title={A response to ``Fast OSCAR and OWL Regression via Safe Screening Rules'' by Bao et al.},institution={},month=oct,year={2021},}
Bayesian nonparametric principal component analysis
Principal component analysis (PCA) is very popular to perform dimension reduction. The selection of the number of significant components is essential but often based on some practical heuristics depending on the application. Only few works have proposed a probabilistic approach able to infer the number of significant components. To this purpose, this paper introduces a Bayesian nonparametric principal component analysis (BNP-PCA). The proposed model projects observations onto a random orthogonal basis which is assigned a prior distribution defined on the Stiefel manifold. The prior on factor scores involves an Indian buffet process to model the uncertainty related to the number of components. The parameters of interest as well as the nuisance parameters are finally inferred within a fully Bayesian framework via Monte Carlo sampling. A study of the (in-)consistence of the marginal maximum a posteriori estimator of the latent dimension is carried out. A new estimator of the subspace dimension is proposed. Moreover, for sake of statistical significance, a Kolmogorov-Smirnov test based on the posterior distribution of the principal components is used to refine this estimate. The behaviour of the algorithm is first studied on various synthetic examples. Finally, the proposed BNP dimension reduction approach is shown to be easily yet efficiently coupled with clustering or latent factor models within a unique framework.
@techreport{elvira2017preprint,title={Bayesian nonparametric principal component analysis},author={Elvira, Cl{\'e}ment and Chainais, Pierre and Dobigeon, Nicolas},journal={arXiv preprint arXiv:1709.05667},year={2017},hal={hal-01687236/},}
Thesis
Modèles bayésiens pour l’identification de représentations antiparcimonieuses et l’analyse en composantes principales non paramétrique
@phdthesis{elvira2017phdthesis,author={Elvira, Clément},title={Modèles bayésiens pour l’identification de représentations antiparcimonieuses et l’analyse en composantes principales non paramétrique},language={French},year={2017},month=nov,school={Centrale Lille},address={Lille, France},hal={tel-01730077v2},}