2007.bib

@comment{{This file has been generated by bib2bib 1.98}}
@comment{{Command line: bib2bib -ob ./bibs/2007.bib -c year=2007 mesPublis.bib}}
@misc{martin,
  author = {Gaëlle Bonnet and POB father},
  year = 2007,
  title = {Martin {B}onnet},
  url = {./images/martin.jpg}
}
@article{LoosliC07,
  author = {Gaëlle Loosli and
	Stéphane Canu},
  title = {Comments on the "Core Vector Machines: Fast {SVM} Training on Very
	Large Data Sets"},
  journal = {Journal of Machine Learning Research},
  volume = {8},
  pages = {291--301},
  year = {2007},
  url = {http://www.jmlr.org/papers/v8/loosli07a.html},
  abstract = {In a recently published paper in JMLR, coresvm, present an algorithm for SVM called Core Vector Machines (CVM) and
	illustrate its performances through comparisons with other SVM solvers.
	After reading the CVM paper we were surprised by some of the reported results.
	In order to clarify the matter, we decided to reproduce some of the experiments.
	It turns out that to some extent, our results contradict those reported.
	Reasons of these different behaviors are given through the analysis of the stopping criterion. }
}
@inbook{loosli2007training,
  chapter = {Training invariant support vector machines using selective sampling},
  author = {Loosli, Gaëlle and Canu, Stéphane and Bottou, Léon},
  title = {Large scale kernel machines},
  pages = {301--320},
  year = {2007},
  publisher = {MIT},
  isbn = {978 0 262 02625 3},
  editor = {Léon Bottou and  Olivier Chapelle and  Denis DeCoste and Jason Weston},
  abstract = {In this chapter we present the combination of two approaches to build a very large SVM.
  The first method from \cite{loosli2005invariances} proposes a strategy for handling invariances in SVMs. It is based on the well-known idea that small deformation of examples should not change their class. The deformed samples are selected or discarded during learning (it is selective sampling). The second approach  is LASVM. It is an efficient online algorithm (each training point is only seen once) that also uses selective sampling.
  %It also uses selective sampling and incorporates the treatment
  We present state-of-the-art results obtained on a handwritten
  digit recognition problem with 8 millions points on a single processor.
  This work also demonstrates that online SVMs can effectively
  handle really large databases.}
}
@inproceedings{Loosli2007,
  author = {Loosli, Gaëlle
	and Gasso, Gilles
	and Canu, Stéphane},
  editor = {Liu, Derong
	and Fei, Shumin
	and Hou, Zengguang
	and Zhang, Huaguang
	and Sun, Changyin},
  title = {Regularization Paths for $\nu$-SVM and $\nu$-SVR},
  booktitle = {Advances in Neural Networks -- ISNN 2007: 4th International Symposium on Neural Networks, ISNN 2007, Nanjing, China, June 3-7, 2007, Proceedings, Part III},
  year = {2007},
  publisher = {Springer Berlin Heidelberg},
  address = {Berlin, Heidelberg},
  pages = {486--496},
  isbn = {978-3-540-72395-0},
  doi = {10.1007/978-3-540-72395-0_62},
  url = {https://doi.org/10.1007/978-3-540-72395-0_62},
  abstract = {This paper presents the $\nu$-SVM and the $\nu$-SVR full regularization paths along with a leave-one-out inspired stopping criterion and an efficient implementation.
	In the $\nu$-SVR method, two parameters are provided by the user: the regularization parameter $C$ and $\nu$ which settles the width of the $\epsilon$-tube.
	In the classical $\nu$-SVM method, parameter $\nu$
	is an lower bound on the number of support vectors in the solution.
	Based on the previous works, extensions of regularization paths for SVM and SVR are proposed and permit to automatically compute the solution path by varying $\nu$ or the regularization parameter.}
}