# 2021.bib

@comment{{This file has been generated by bib2bib 1.99}}

@comment{{Command line: bib2bib -ob ./bibs/2021.bib -c year=2021 mesPublis.bib}}

@inproceedings{Seck21a,
author = {Ismaïla Seck and Gaëlle Loosli and Stéphane Canu},
title = {Linear Program Powered Attack},
booktitle = {IJCNN - International Joint Conference on Neural Network},
year = {2021},
url = {https://www.ijcnn.org/},
abstract = { Finding the exact robust test error is a good way to compare the robustness of neural networks, but it is a difficult task even on small networks and datasets like MNIST.
However, finding reasonable lower upper bounds is possible and can be done using either complete methods or attacks.
On the one hand, complete methods such as Mixed Integer Program (MIP) give exact robust test accuracy but are time-consuming. On the other hand, attacks are usually fast but tend to perform badly against robust network and underestimate the lower bound on the robust test error.
The purpose of this paper is to present a novel attack method that is both fast and gives better lower bounds than previous attacks. This method exploits the algebraic properties of networks with piecewise linear activation functions to partition the input space in such a way that for each subset of that partition, finding the local optimal adversarial example is done by solving a linear program. Moving from one subset to another is done using classical gradient-based attack tools. To evaluate the quality of the produced adversarial examples, we compare our lower bound on the robust test error to the one previously found. The results found are satisfying in the sense that it does better than previous lower bounds on several models and finds adversarial examples that the MIP failed to expose before reaching its time limit.}
}