2019.bib


	
	@misc{Loosli19b,
		title={Non convex combinations in Multiple Kernel Learning},
		author={Gaëlle Loosli},
		journal = {Poster at Women in Machine Learning (WiML) workshop, co-located with NeurIPS 2019},
		year = {2019},
	  abstract={Multiple Kernel Learning is an elegant way of combining several kernels, but it is restricted to a convex linear combination. Taking advantages of theoretical and practical tools provided by works on learning in  Krein spaces, we introduce here non convex combinations of kernels for MKL, solved in  Krein spaces. The advantages are 1/ it can mix distances and similarities directly. 2/ it can handle indefinite kernels 3/ it can be solved at the cost of a couple of SVM using EasyMKL formulation. }
	}


@misc{Loosli19,
	title={TrIK-SVM : an alternative decomposition for kernel methods in Krein spaces},
	author={Gaëlle Loosli},
	journal = {ESANN - European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning},
	year = {2019},
  url = {https://www.elen.ucl.ac.be},
  abstract={The proposed work aims at proposing a alternative kernel decomposition in the context of kernel machines with indefinite kernels.
		The original paper of KSVM (SVM in \krein spaces) uses the eigen-decomposition, our proposition avoids this decompostion.
		We explain how it can help in designing an algorithm that won't require to compute the full kernel matrix.
		Finally we illustrate the good behavior of the proposed method compared to KSVM.}
	}

@misc{Seck19,
  author = {Ismaïla Seck and Gaëlle Loosli and Stéphane Canu},
  title = { L1-norm double backpropagation adversarial defense},
  booktitle = {ESANN - European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning},
  year = {2019},
  url = {https://www.elen.ucl.ac.be},
  abstract = {Adversarial examples are a challenging open problem for deep neural networks.
		We propose in this paper to add a penalization term  that forces the decision function to be flat in some regions of the input space,
		such that it becomes, at least locally, less sensitive to attacks.
		Our proposition is theoretically motivated and shows on a first set of carefully conducted experiments that it behaves as expected when used alone,
		and seems promising when coupled with adversarial training. }
}