-
Notifications
You must be signed in to change notification settings - Fork 33
/
paper.bib
596 lines (521 loc) · 21.5 KB
/
paper.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
@inproceedings{dalvi2004adversarial,
title={Adversarial classification},
author={Dalvi, Nilesh and Domingos, Pedro and Sanghai, Sumit and Verma, Deepak and others},
booktitle={Proceedings of the tenth ACM SIGKDD international conference on Knowledge discovery and data mining},
pages={99--108},
year={2004},
organization={ACM}
}
@inproceedings{lowd2005adversarial,
title={Adversarial learning},
author={Lowd, Daniel and Meek, Christopher},
booktitle={Proceedings of the eleventh ACM SIGKDD international conference on Knowledge discovery in data mining},
pages={641--647},
year={2005},
organization={ACM}
}
@inproceedings{barreno2006can,
title={Can machine learning be secure?},
author={Barreno, Marco and Nelson, Blaine and Sears, Russell and Joseph, Anthony D and Tygar, J Doug},
booktitle={Proceedings of the 2006 ACM Symposium on Information, computer and communications security},
pages={16--25},
year={2006},
organization={ACM}
}
@inproceedings{globerson2006nightmare,
title={Nightmare at test time: robust learning by feature deletion},
author={Globerson, Amir and Roweis, Sam},
booktitle={Proceedings of the 23rd international conference on Machine learning},
pages={353--360},
year={2006},
organization={ACM}
}
@inproceedings{kolcz2009feature,
title={Feature weighting for improved classifier robustness},
author={Ko{\l}cz, Aleksander and Teo, Choon Hui},
booktitle={CEAS’09: sixth conference on email and anti-spam},
year={2009}
}
@article{barreno2010security,
title={The security of machine learning},
author={Barreno, Marco and Nelson, Blaine and Joseph, Anthony D and Tygar, JD},
journal={Machine Learning},
volume={81},
number={2},
pages={121--148},
year={2010},
publisher={Springer}
}
@article{biggio2010multiple,
title={Multiple classifier systems for robust classifier design in adversarial environments},
author={Biggio, Battista and Fumera, Giorgio and Roli, Fabio},
journal={International Journal of Machine Learning and Cybernetics},
volume={1},
number={1-4},
pages={27--41},
year={2010},
publisher={Springer}
}
@inproceedings{vsrndic2013detection,
title={Detection of malicious pdf files based on hierarchical document structure},
author={{\v{S}}rndic, Nedim and Laskov, Pavel},
booktitle={Proceedings of the 20th Annual Network \& Distributed System Security Symposium},
pages={1--16},
year={2013},
organization={Citeseer}
}
@article{silver2016mastering,
title={Mastering the game of Go with deep neural networks and tree search},
author={Silver, David and Huang, Aja and Maddison, Chris J and Guez, Arthur and Sifre, Laurent and Van Den Driessche, George and Schrittwieser, Julian and Antonoglou, Ioannis and Panneershelvam, Veda and Lanctot, Marc and others},
journal={nature},
volume={529},
number={7587},
pages={484},
year={2016},
publisher={Nature Publishing Group}
}
@inproceedings{krizhevsky2012imagenet,
title={Imagenet classification with deep convolutional neural networks},
author={Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey E},
booktitle={Advances in neural information processing systems},
pages={1097--1105},
year={2012}
}
@article{carlini2016defensive,
title={Defensive distillation is not robust to adversarial examples},
author={Carlini, Nicholas and Wagner, David},
journal={arXiv preprint arXiv:1607.04311},
year={2016}
}
@inproceedings{carlini2017towards,
title={Towards evaluating the robustness of neural networks},
author={Carlini, Nicholas and Wagner, David},
booktitle={2017 IEEE Symposium on Security and Privacy (SP)},
pages={39--57},
year={2017},
organization={IEEE}
}
@inproceedings{carlini2017adversarial,
title={Adversarial examples are not easily detected: Bypassing ten detection methods},
author={Carlini, Nicholas and Wagner, David},
booktitle={Proceedings of the 10th ACM Workshop on Artificial Intelligence and Security},
pages={3--14},
year={2017},
organization={ACM}
}
@article{athalye2018obfuscated,
title={Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples},
author={Athalye, Anish and Carlini, Nicholas and Wagner, David},
journal={arXiv preprint arXiv:1802.00420},
year={2018}
}
@article{engstrom2018evaluating,
title={Evaluating and understanding the robustness of adversarial logit pairing},
author={Engstrom, Logan and Ilyas, Andrew and Athalye, Anish},
journal={arXiv preprint arXiv:1807.10272},
year={2018}
}
@article{athalye2018robustness,
title={On the Robustness of the CVPR 2018 White-Box Adversarial Example Defenses},
author={Athalye, Anish and Carlini, Nicholas},
journal={arXiv preprint arXiv:1804.03286},
year={2018}
}
@article{mosbach2018logit,
title={Logit Pairing Methods Can Fool Gradient-Based Attacks},
author={Mosbach, Marius and Andriushchenko, Maksym and Trost, Thomas and Hein, Matthias and Klakow, Dietrich},
journal={arXiv preprint arXiv:1810.12042},
year={2018}
}
@article{brendel2017comment,
title={Comment on "Biologically inspired protection of deep networks from adversarial attacks"},
author={Brendel, Wieland and Bethge, Matthias},
journal={arXiv preprint arXiv:1704.01547},
year={2017}
}
@article{he2018decision,
title={Decision boundary analysis of adversarial examples},
author={He, Warren and Li, Bo and Song, Dawn},
journal={International Conference on Learning Representations},
year={2018}
}
@article{carlini2017magnet,
title={Magnet and "efficient defenses against adversarial attacks" are not robust to adversarial examples},
author={Carlini, Nicholas and Wagner, David},
journal={arXiv preprint arXiv:1711.08478},
year={2017}
}
@article{sharma2017breaking,
title={Breaking the Madry Defense Model with $ L\_1 $-based Adversarial Examples},
author={Sharma, Yash and Chen, Pin-Yu},
journal={arXiv preprint arXiv:1710.10733},
year={2017}
}
@article{sharma2018bypassing,
title={Bypassing Feature Squeezing by Increasing Adversary Strength},
author={Sharma, Yash and Chen, Pin-Yu},
journal={arXiv preprint arXiv:1803.09868},
year={2018}
}
@inproceedings{lu2018limitation,
title={On the limitation of magnet defense against L1-based adversarial examples},
author={Lu, Pei-Hsuan and Chen, Pin-Yu and Chen, Kang-Cheng and Yu, Chia-Mu},
booktitle={2018 48th Annual IEEE/IFIP International Conference on Dependable Systems and Networks Workshops (DSN-W)},
pages={200--214},
year={2018},
organization={IEEE}
}
@article{lu2018blimitation,
title={On the Limitation of Local Intrinsic Dimensionality for Characterizing the Subspaces of Adversarial Examples},
author={Lu, Pei-Hsuan and Chen, Pin-Yu and Yu, Chia-Mu},
journal={arXiv preprint arXiv:1803.09638},
year={2018}
}
@article{song2018generative,
title={Generative Adversarial Examples},
author={Song, Yang and Shu, Rui and Kushman, Nate and Ermon, Stefano},
journal={arXiv preprint arXiv:1805.07894},
year={2018}
}
@article{szegedy2013intriguing,
title={Intriguing properties of neural networks},
author={Szegedy, Christian and Zaremba, Wojciech and Sutskever, Ilya and Bruna, Joan and Erhan, Dumitru and Goodfellow, Ian and Fergus, Rob},
journal={arXiv preprint arXiv:1312.6199},
year={2013}
}
@inproceedings{biggio2013evasion,
title={Evasion attacks against machine learning at test time},
author={Biggio, Battista and Corona, Igino and Maiorca, Davide and Nelson, Blaine and {\v{S}}rndi{\'c}, Nedim and Laskov, Pavel and Giacinto, Giorgio and Roli, Fabio},
booktitle={Joint European conference on machine learning and knowledge discovery in databases},
pages={387--402},
year={2013},
organization={Springer}
}
@article{madry2017towards,
title={Towards deep learning models resistant to adversarial attacks},
author={Madry, Aleksander and Makelov, Aleksandar and Schmidt, Ludwig and Tsipras, Dimitris and Vladu, Adrian},
journal={arXiv preprint arXiv:1706.06083},
year={2017}
}
@article{goodfellow2014explaining,
title={Explaining and harnessing adversarial examples (2014)},
author={Goodfellow, Ian J and Shlens, Jonathon and Szegedy, Christian},
year={2014},
journal={arXiv preprint arXiv:1412.6572}
}
@article{tramer2017ensemble,
title={Ensemble adversarial training: Attacks and defenses},
author={Tram{\`e}r, Florian and Kurakin, Alexey and Papernot, Nicolas and Goodfellow, Ian and Boneh, Dan and McDaniel, Patrick},
journal={arXiv preprint arXiv:1705.07204},
year={2017}
}
@article{qian2018l2,
title={L2-Nonexpansive Neural Networks},
author={Qian, Haifeng and Wegman, Mark N},
journal={arXiv preprint arXiv:1802.07896},
year={2018}
}
@inproceedings{moosavi2016deepfool,
title={Deepfool: a simple and accurate method to fool deep neural networks},
author={Moosavi-Dezfooli, Seyed-Mohsen and Fawzi, Alhussein and Frossard, Pascal},
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
pages={2574--2582},
year={2016}
}
@inproceedings{chen2017zoo,
title={Zoo: Zeroth order optimization based black-box attacks to deep neural networks without training substitute models},
author={Chen, Pin-Yu and Zhang, Huan and Sharma, Yash and Yi, Jinfeng and Hsieh, Cho-Jui},
booktitle={Proceedings of the 10th ACM Workshop on Artificial Intelligence and Security},
pages={15--26},
year={2017}
}
@article{chen2017ead,
title={EAD: elastic-net attacks to deep neural networks via adversarial examples},
author={Chen, Pin-Yu and Sharma, Yash and Zhang, Huan and Yi, Jinfeng and Hsieh, Cho-Jui},
journal={arXiv preprint arXiv:1709.04114},
year={2017}
}
@article{uesato2018adversarial,
title={Adversarial risk and the dangers of evaluating against weak attacks},
author={Uesato, Jonathan and O'Donoghue, Brendan and Oord, Aaron van den and Kohli, Pushmeet},
journal={arXiv preprint arXiv:1802.05666},
year={2018}
}
@article{ilyas2018black,
title={Black-box Adversarial Attacks with Limited Queries and Information},
author={Ilyas, Andrew and Engstrom, Logan and Athalye, Anish and Lin, Jessy},
journal = {International Conference on Machine Learning (ICML)},
year = {2018}
}
@article{brendel2017decision,
title={Decision-based adversarial attacks: Reliable attacks against black-box machine learning models},
author={Brendel, Wieland and Rauber, Jonas and Bethge, Matthias},
journal={arXiv preprint arXiv:1712.04248},
year={2017}
}
@article{papernot2016transferability,
title={Transferability in machine learning: from phenomena to black-box attacks using adversarial samples},
author={Papernot, Nicolas and McDaniel, Patrick and Goodfellow, Ian},
journal={arXiv preprint arXiv:1605.07277},
year={2016}
}
@inproceedings{papernot2016limitations,
title={The limitations of deep learning in adversarial settings},
author={Papernot, Nicolas and McDaniel, Patrick and Jha, Somesh and Fredrikson, Matt and Celik, Z Berkay and Swami, Ananthram},
booktitle={Security and Privacy (EuroS\&P), 2016 IEEE European Symposium on},
pages={372--387},
year={2016},
organization={IEEE}
}
@article{liu2016delving,
title={Delving into transferable adversarial examples and black-box attacks},
author={Liu, Yanpei and Chen, Xinyun and Liu, Chang and Song, Dawn},
journal={arXiv preprint arXiv:1611.02770},
year={2016}
}
@article{he2017adversarial,
title={Adversarial example defenses: Ensembles of weak defenses are not strong},
author={He, Warren and Wei, James and Chen, Xinyun and Carlini, Nicholas and Song, Dawn},
journal={arXiv preprint arXiv:1706.04701},
year={2017},
publisher={arXivpreprint}
}
@inproceedings{herley2017sok,
title={Sok: Science, security and the elusive goal of security as a scientific pursuit},
author={Herley, Cormac and van Oorschot, Paul C},
booktitle={Security and Privacy (SP), 2017 IEEE Symposium on},
pages={99--120},
year={2017},
organization={IEEE}
}
@article{papernot2018cleverhans,
title={Technical Report on the CleverHans v2.1.0 Adversarial Examples Library},
author={Nicolas Papernot and Fartash Faghri and Nicholas Carlini and
Ian Goodfellow and Reuben Feinman and Alexey Kurakin and Cihang Xie and
Yash Sharma and Tom Brown and Aurko Roy and Alexander Matyasko and
Vahid Behzadan and Karen Hambardzumyan and Zhishuai Zhang and
Yi-Lin Juang and Zhi Li and Ryan Sheatsley and Abhibhav Garg and
Jonathan Uesato and Willi Gierke and Yinpeng Dong and David Berthelot and
Paul Hendricks and Jonas Rauber and Rujun Long},
journal={arXiv preprint arXiv:1610.00768},
year={2018}
}
@article{rauber2017foolbox,
title={Foolbox v0.8.0: A python toolbox to benchmark the robustness of machine learning models},
author={Rauber, Jonas and Brendel, Wieland and Bethge, Matthias},
journal={arXiv preprint arXiv:1707.04131},
year={2017}
}
@inproceedings{dahl2013large,
title={Large-scale malware classification using random projections and neural networks},
author={Dahl, George E and Stokes, Jack W and Deng, Li and Yu, Dong},
booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference on},
pages={3422--3426},
year={2013},
organization={IEEE}
}
@article{tramer2018ad,
title={Ad-versarial: Defeating Perceptual Ad-Blocking},
author={Tram{\`e}r, Florian and Dupr{\'e}, Pascal and Rusak, Gili and Pellegrino, Giancarlo and Boneh, Dan},
journal={arXiv preprint arXiv:1811.03194},
year={2018}
}
@inproceedings{carlini2016hidden,
title={Hidden Voice Commands.},
author={Carlini, Nicholas and Mishra, Pratyush and Vaidya, Tavish and Zhang, Yuankai and Sherr, Micah and Shields, Clay and Wagner, David and Zhou, Wenchao},
booktitle={USENIX Security Symposium},
pages={513--530},
year={2016}
}
@article{kolter2017provable,
title={Provable defenses against adversarial examples via the convex outer adversarial polytope},
author={Kolter, J Zico and Wong, Eric},
journal={arXiv preprint arXiv:1711.00851},
volume={1},
number={2},
pages={3},
year={2017}
}
@article{raghunathan2018certified,
title={Certified defenses against adversarial examples},
author={Raghunathan, Aditi and Steinhardt, Jacob and Liang, Percy},
journal={arXiv preprint arXiv:1801.09344},
year={2018}
}
@article{sinha2018certifying,
title={Certifying some distributional robustness with principled adversarial training},
author={Sinha, Aman and Namkoong, Hongseok and Duchi, John},
journal={International Conference on Learning Representations},
year={2018}
}
@inproceedings{lecuyer2018certified,
title={Certified robustness to adversarial examples with differential privacy},
author={Lecuyer, Mathias and Atlidakis, Vaggelis and Geambasu, Roxana and Hsu, Daniel and Jana, Suman},
booktitle={Certified Robustness to Adversarial Examples with Differential Privacy},
pages={0},
year={2018},
organization={IEEE}
}
@inproceedings{katz2017reluplex,
title={Reluplex: An efficient SMT solver for verifying deep neural networks},
author={Katz, Guy and Barrett, Clark and Dill, David L and Julian, Kyle and Kochenderfer, Mykel J},
booktitle={International Conference on Computer Aided Verification},
pages={97--117},
year={2017},
organization={Springer}
}
@inproceedings{elsayed2018adversarial,
title={Adversarial examples that fool both computer vision and time-limited humans},
author={Elsayed, Gamaleldin and Shankar, Shreya and Cheung, Brian and Papernot, Nicolas and Kurakin, Alexey and Goodfellow, Ian and Sohl-Dickstein, Jascha},
booktitle={Advances in Neural Information Processing Systems},
pages={3914--3924},
year={2018}
}
@inproceedings{bhagoji2018practical,
title={Practical Black-Box Attacks on Deep Neural Networks Using Efficient Query Mechanisms},
author={Bhagoji, Arjun Nitin and He, Warren and Li, Bo and Song, Dawn},
booktitle={European Conference on Computer Vision},
pages={158--174},
year={2018},
organization={Springer}
}
@article{gilmer2018motivating,
title={Motivating the rules of the game for adversarial example research},
author={Gilmer, Justin and Adams, Ryan P and Goodfellow, Ian and Andersen, David and Dahl, George E},
journal={arXiv preprint arXiv:1807.06732},
year={2018}
}
@article{engstrom2017rotation,
title={A rotation and a translation suffice: Fooling {CNNs} with simple transformations},
author={Engstrom, Logan and Tran, Brandon and Tsipras, Dimitris and Schmidt, Ludwig and Madry, Aleksander},
journal={arXiv preprint arXiv:1712.02779},
year={2017}
}
@article{kurakin2016adversarial,
title={Adversarial examples in the physical world},
author={Kurakin, Alexey and Goodfellow, Ian and Bengio, Samy},
journal={arXiv preprint arXiv:1607.02533},
year={2016}
}
@article{saltzer1975protection,
title={The protection of information in computer systems},
author={Saltzer, Jerome H and Schroeder, Michael D},
journal={Proceedings of the IEEE},
volume={63},
number={9},
pages={1278--1308},
year={1975},
publisher={IEEE}
}
@article{kerckhoffs1883cryptographic,
title={La cryptographie militaire},
author={Kerckhoffs, Auguste},
journal={Journal des sciences militaires},
pages={5--38},
year={1883}
}
@inproceedings{papernot2017practical,
title={Practical black-box attacks against machine learning},
author={Papernot, Nicolas and McDaniel, Patrick and Goodfellow, Ian and Jha, Somesh and Celik, Z Berkay and Swami, Ananthram},
booktitle={Proceedings of the 2017 ACM on Asia Conference on Computer and Communications Security},
pages={506--519},
year={2017},
organization={ACM}
}
@article{cornelius2019efficacy,
title={The Efficacy of SHIELD under Different Threat Models},
author={Cornelius, Cory},
journal={arXiv preprint arXiv:1902.00541},
year={2019}
}
@article{carlini2019ami,
title={Is AmI (Attacks Meet Interpretability) Robust to Adversarial Examples?},
author={Carlini, Nicholas},
journal={arXiv preprint arXiv:1902.02322},
year={2019}
}
@article{schott2018,
title={Towards the first adversarially robust neural network model on MNIST},
author={Schott, Lukas and Rauber, Jonas and Bethge, Matthias and Brendel, Wieland},
journal={International Conference for Learning Representations 2019},
year={2019}
}
@article{ford2019adversarial,
title={Adversarial Examples Are a Natural Consequence of Test Error in Noise},
author={Ford, Nic and Gilmer, Justin and Carlini, Nicolas and Cubuk, Dogus},
journal={arXiv preprint arXiv:1901.10513},
year={2019}
}
@article{hendrycks2018benchmarking,
title={Benchmarking Neural Network Robustness to Common Corruptions and Perturbations},
author={Hendrycks, Dan and Dietterich, Thomas},
journal={International Conference on Learning Representations (ICLR)},
year={2019}
}
@inproceedings{fawzi2016robustness,
title={Robustness of classifiers: from adversarial to random noise},
author={Fawzi, Alhussein and Moosavi-Dezfooli, Seyed-Mohsen and Frossard, Pascal},
booktitle={Advances in Neural Information Processing Systems},
pages={1632--1640},
year={2016}
}
@inproceedings{jetley2018friends,
title={With friends like these, who needs adversaries?},
author={Jetley, Saumya and Lord, Nicholas and Torr, Philip},
booktitle={Advances in Neural Information Processing Systems},
pages={10772--10782},
year={2018}
}
@inproceedings{TjengXT19,
author={Vincent Tjeng and Kai Xiao and Russ Tedrake},
title={Evaluating Robustness of Neural Networks with Mixed Integer Programming},
Booktitle = {International Conference on Learning Representations (ICLR)},
Year = {2019}
}
@inproceedings{TuTCLZYHC18,
title={AutoZOOM: Autoencoder-based Zeroth Order Optimization Method for Attacking Black-box Neural Networks},
author={Chun-Chen Tu and Paishun Ting and Pin-Yu Chen and Sijia Liu and Huan Zhang and Jinfeng Yi and Cho-Jui Hsieh and Shin-Ming Cheng},
year={2018},
Booktitle = {ArXiv preprint arXiv:1805.11770}
}
@inproceedings{IlyasEM18,
title={Prior Convictions: Black-box Adversarial Attacks with Bandits and Priors},
author={Ilyas, Andrew and Engstrom, Logan and Madry, Aleksander},
booktitle = {International Conference on Learning Representations (ICLR)},
year={2018}
}
@inproceedings{LiuCLS17,
title={Delving into Transferable Adversarial Examples and Black-box Attacks},
author={Yanpei Liu and Xinyun Chen and Chang Liu and Dawn Song},
booktitle = {International Conference on Learning Representations (ICLR)},
year={2017}
}
@inproceedings{GowalDSBQUAMK18,
title={On the Effectiveness of Interval Bound Propagation for Training Verifiably Robust Models},
author={Sven Gowal and Krishnamurthy Dvijotham and Robert Stanforth and Rudy Bunel and Chongli Qin and Jonathan Uesato and Relja Arandjelovic and Timothy Mann and Pushmeet Kohli},
booktitle = {ArXiv preprint arXiv:1810.12715},
year={2018}
}
@inproceedings{WengZCSHBDD18,
title={Towards fast computation of certified robustness for {ReLU} networks},
author={Weng, Tsui-Wei and Zhang, Huan and Chen, Hongge and Song, Zhao and Hsieh, Cho-Jui and Boning, Duane and Dhillon, Inderjit S and Daniel, Luca},
booktitle={International Conference on Machine Learning (ICML)},
year={2018}
}
@inproceedings{XiaoTSM19,
author = {Kai Y. Xiao and
Vincent Tjeng and
Nur Muhammad Shafiullah and
Aleksander Madry},
Title = {Training for Faster Adversarial Robustness Verification via Inducing
{ReLU} Stability},
Booktitle = {International Conference on Learning Representations (ICLR)},
Year = {2019}
}
@InProceedings{AthalyeEIK18,
title = {Synthesizing Robust Adversarial Examples},
author = {Athalye, Anish and Engstrom, Logan and Ilyas, Andrew and Kwok, Kevin},
booktitle = {International Conference on Machine Learning (ICML)},
pages = {284--293},
year = {2018}
}