2006.bib
@comment{{This file has been generated by bib2bib 1.98}}
@comment{{Command line: bib2bib -ob ./bibs/2006.bib -c year=2006 mesPublis.bib}}
@article{LoosliLR06,
author = {Gaëlle Loosli and
Sans{-}Goog Lee and
Alain Rakotomamonjy},
title = {Perception d'états affectifs et apprentissage},
journal = {Revue d'Intelligence Artificielle},
volume = {20},
number = {4-5},
pages = {553--582},
year = {2006},
url = {https://doi.org/10.3166/ria.20.553-582},
doi = {10.3166/ria.20.553-582},
abstract = {This article deals with the problem of affective states recognition from physical and physiological wearable sensors.
Given the complex nature of the relationship between available signals and affective states to be detected we propose to use a statistical learning method.
We begin with a discussion about the state of the art in the field of statistical learning algorithms and their application to affective states recognition.
Then a framework is presented to compare different learning algorithms and methodologies.
Using the results of this pre-study, a global architecture is proposed for a real time embedded recognition system.
Instead of directly recognizing the affective states we propose to begin with detecting abrupt changes in the incoming signal
to segment it first and label each segment afterwards.
The interest of the proposed method is demonstrated on two real affective state recognition tasks.}
}
@inproceedings{loosli2006svm,
title = {SVM et apprentissage des très grandes bases de données},
author = {Loosli, Gaëlle and Canu, Stéphane and Bottou, Léon},
book = {Conférence francophone sur l'apprentissage automatique , Tregastel, France },
year = {2006},
abstract = {Le but de ce travail est de montrer qu'il est possible de faire de la discrimination à l'aide de Séparateurs à Vaste Marge (SVM) sur des très grandes bases de données (des millions d'exemples, des centaines de caractéristiques et une dizaine de classes).
Pout traiter cette masse de données, nous nous proposons d'utiliser un algorithme "en ligne" où les exemples sont présentés les uns après les autres. Cette approche permet à la fois une mise à jour rapide de la solution (qui ne dépend que d'un seul exemple à la fois) et la gestion efficace de la base d'apprentissage (qui n'a pas à être entièrement en mémoire).
L'application visée est la reconnaissance de caractères avec prise en compte des invariances dans les données. Pour cela, nous adaptons l'algorithme LASVM (une méthode en ligne pour les SVM) en nous inspirant de \cite{loosli2005invariances} pour y intégrer la connaissance {\em a priori} sur l'invariance.}
}
@inproceedings{delorme2006outil,
title = {Un outil générique pour l’analyse automatique et la visualisation de productions d’apprenants},
author = {Delorme, Fabien and Loosli, Gaëlle},
journal = {TICE 2006, Technologies de l’Information et de la Communication dans les Enseignements},
year = {2006},
abstract = {L'analyse automatique de productions d'apprenants est un problème central dans le cadre des EIAH et plus particulièrement
des STI (Systèmes Tutoriels Intelligents).
Nous proposons dans cet article un outil générique d'analyse assistée de productions d'apprenants.
Cet outil doit permettre à un concepteur d'EIAH d'instancier facilement un module d'évaluation dans le cadre d'un EIAH
particulier.
L'outil instancié permettra alors une analyse automatique de la production de l'apprenant, tout en permettant au tuteur
d'avoir une vision globale du niveau des apprenants participant à la formation.
L'article présente en détail la démarche mise en \oe uvre ainsi que quelques exemples d'utilisation de l'outil.
}
}
@phdthesis{loosli2006methodes,
title = {Méthodes à noyaux pour la détection de contexte},
author = {Loosli, Gaëlle},
year = {2006},
school = {INSA de Rouen},
abstract = {To make applications able to be aware of their context opens a significant number of perpectives in the human-computer interaction.
The large number of studies aiming at determining how to use the context shows the need for context retrieval.
By analyzing the requirements in machine learning for each task related to context detection, we encountered a certain number of limits.
Consequently, most of the work carried out during this thesis is related to machine learning from a general point of view. The goal is to achieve autonomous and enduring learning.
By autonomous, we mean learning which does not require the intervention of a specialist. That implies to use methods that can auto-set and be used online.
By enduring, we refer to a realistic use of the applications,{\em i.e.} real time, therefore fast, online and stable, for a very significant amount of data . Because the SVM give precise results, we focused our work on this method. However nSVM are far from fulfilling the requirements of autonomous and enduring learning.
Autonomous learning is not only subjected to the need for efficiency of the solver. It is also limited by the presence of hyper-parameters. In the case of SVM, these hyper-parameters are relatively few. However, only one is enough to make a method dependent on a form of supervision which contradicts either the need for training on line, or the objective of independence with respect to a human intervention.
We studied this problem via the regularization paths. The regularization paths make it possible to know all the solutions of a problem taking into consideration the biais-variance compromise. For the SVM, this compromise is regulated by one of the hyper-parameters and we thus use the regularization path to obtain an automatic adjustment of this hyper-parameter. We did not reach the stage of push-button SVM yet but we show that all the limits of the SVM are not insurmountable. For the possible size of the databases, we implemented the largest SVM to date on only one processor with 80 million points in dimension 784, by using the online method LASVM.}
}
@inproceedings{loosli2006auto,
title = {Auto-setting the SVM hyper-parameters using regularization paths},
author = {Loosli, Gaëlle and Canu, Stéphane},
year = {2006},
book = {A Workshop for Women in Machine Learning, San Diego, USA }
}