{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,29]],"date-time":"2026-04-29T13:03:42Z","timestamp":1777467822255,"version":"3.51.4"},"reference-count":71,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-009"},{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-001"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,5]]},"DOI":"10.1109\/sp.2019.00044","type":"proceedings-article","created":{"date-parts":[[2019,9,16]],"date-time":"2019-09-16T22:03:02Z","timestamp":1568671382000},"page":"656-672","source":"Crossref","is-referenced-by-count":379,"title":["Certified Robustness to Adversarial Examples with Differential Privacy"],"prefix":"10.1109","author":[{"given":"Mathias","family":"Lecuyer","sequence":"first","affiliation":[]},{"given":"Vaggelis","family":"Atlidakis","sequence":"additional","affiliation":[]},{"given":"Roxana","family":"Geambasu","sequence":"additional","affiliation":[]},{"given":"Daniel","family":"Hsu","sequence":"additional","affiliation":[]},{"given":"Suman","family":"Jana","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1145\/3132847.3133031"},{"key":"ref70","article-title":"Wide residual networks","author":"zagoruyko","year":"2016","journal-title":"CoRR"},{"key":"ref39","article-title":"Empirical bernstein bounds and sample-variance penalization","author":"maurer","year":"2009","journal-title":"COLT 2009 The 22nd Conference on Learning Theory"},{"key":"ref38","year":"2017","journal-title":"CIFAR-10 Adversarial Examples Challenge"},{"key":"ref33","author":"krizhevsky","year":"2009","journal-title":"Learning multiple layers of features from tiny images"},{"key":"ref32","article-title":"Delving into adversarial attacks on deep policies","author":"kos","year":"2017","journal-title":"arXiv preprint arXiv 1705 06452"},{"key":"ref31","article-title":"Adversarial examples for generative models","author":"kos","year":"2017","journal-title":"arXiv preprint arXiv 1702 06832"},{"key":"ref30","article-title":"Reluplex: An efficient SMT solver for verifying deep neural networks","author":"katz","year":"2017","journal-title":"CoRR"},{"key":"ref37","article-title":"Towards deep learning models resistant to adversarial attacks","volume":"abs 1706 6083","author":"madry","year":"2017","journal-title":"CoRR"},{"key":"ref36","article-title":"No need to worry about adversarial examples in object detection in autonomous vehicles","author":"lu","year":"2017","journal-title":"CVPR"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.2172\/1346183"},{"key":"ref34","article-title":"Adversarial examples in the physical world","author":"kurakin","year":"2016","journal-title":"arXiv preprint 1607 02533"},{"key":"ref60","article-title":"Stacked denoising autoencoders: Learning useful representations in a deep network with a local denoising criterion","author":"vincent","year":"2010","journal-title":"J Mach Learn Res"},{"key":"ref62","article-title":"Formal security analysis of neural networks using symbolic intervals","author":"wang","year":"2018","journal-title":"Proceedings of the 27th USENIX Security Symposium"},{"key":"ref61","article-title":"Efficient formal safety analysis of neural networks","author":"wang","year":"2018","journal-title":"Advances in Neural Information Processing Systems (NIPS)"},{"key":"ref63","article-title":"Evaluating the robustness of neural networks: An extreme value theory approach","author":"weng","year":"2018","journal-title":"arXiv preprint arXiv 1801 10578"},{"key":"ref28","article-title":"Batch normalization: Accelerating deep network training by reducing internal covariate shift","author":"ioffe","year":"2015","journal-title":"International Conference on Machine Learning"},{"key":"ref64","year":"2017","journal-title":"Operator norm"},{"key":"ref27","article-title":"The robust manifold defense: Adversarial training using generative models","volume":"abs 1712 9196","author":"ilyas","year":"2017","journal-title":"CoRR"},{"key":"ref65","article-title":"Provable defenses against adversarial examples via the convex outer adversarial polytope","author":"wong","year":"2018","journal-title":"International Conference on Machine Learning"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/543"},{"key":"ref29","article-title":"Thermometer encoding: One hot way to resist adversarial examples","author":"buckman","year":"2018","journal-title":"International Conference on Learning Representations"},{"key":"ref67","author":"xiao","year":"2018","journal-title":"Spatially transformed adversarial examples"},{"key":"ref68","article-title":"Pixeldefend: Leveraging generative models to understand and defend against adversarial examples","author":"song","year":"2018","journal-title":"International Conference on Learning Representations"},{"key":"ref69","author":"lecun","year":"2017","journal-title":"The MNIST Database of Handwritten Digits"},{"key":"ref2","author":"athalye","year":"2018","journal-title":"Obfuscated gradients give a false sense of security Circumventing defenses to adversarial examples"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/2976749.2978318"},{"key":"ref20","article-title":"Ai 2: Safety and robustness certification of neural networks with abstract interpretation","author":"gehr","year":"2018","journal-title":"IEEE Symposium on Security and Privacy (SP)"},{"key":"ref22","year":"2018","journal-title":"Inception v3"},{"key":"ref21","article-title":"Explaining and hamessing adversarial examples","author":"goodfellow","year":"2015","journal-title":"Proc 3rd ICLR"},{"key":"ref24","article-title":"Early methods for detecting adversarial images","author":"hendrycks","year":"2017","journal-title":"ICLR (workshop track)"},{"key":"ref23","article-title":"Stochastic activation pruning for robust adversarial defense","author":"dhillon","year":"2018","journal-title":"International Conference on Learning Representations"},{"key":"ref26","article-title":"Safety verification of deep neural networks","author":"huang","year":"2017","journal-title":"Proceedings of the 29th International Conference on Computer Aided Verification"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1080\/01621459.1963.10500830"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1145\/3132747.3132785"},{"key":"ref51","article-title":"Defense-GAN: Protecting classifiers against adversarial attacks using generative models","author":"samangouei","year":"2018","journal-title":"International Conference on Learning Representations"},{"key":"ref59","article-title":"Ensemble adversarial training: Attacks and defenses","volume":"abs 1705 7204","author":"tram\u00e9r","year":"2017","journal-title":"CoRR"},{"key":"ref58","article-title":"Evaluating robustness of neural networks with mixed integer programming","author":"tjeng","year":"2017","journal-title":"arXiv preprint arXiv 1711 07356"},{"key":"ref57","year":"2017","journal-title":"Tensorflow r1 5 Resnet models"},{"key":"ref56","article-title":"Intriguing properties of neural networks","author":"szegedy","year":"2014","journal-title":"Proceedings of the 2nd International Conference on Learning Representations"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.308"},{"key":"ref54","author":"song","year":"2018","journal-title":"Generative adversarial examples"},{"key":"ref53","author":"sinha","year":"2017","journal-title":"Certifying Some Distributional Robustness with Principled Adversarial Training"},{"key":"ref52","article-title":"Certified defenses against adversarial examples","author":"raghunathan","year":"2018","journal-title":"arXiv preprint arXiv 1801 09344"},{"key":"ref10","article-title":"Countering adversarial images using input transformations","author":"guo","year":"2018","journal-title":"International Conference on Learning Representations"},{"key":"ref11","article-title":"Mitigating adversarial effects through randomization","author":"xie","year":"2018","journal-title":"International Conference on Learning Representations"},{"key":"ref40","doi-asserted-by":"crossref","DOI":"10.1145\/1557019.1557090","article-title":"Differentially private recommender systems: Building privacy into the netflix prize contenders","author":"mcsherry","year":"2009","journal-title":"Proceedings of the 15th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining"},{"key":"ref12","article-title":"Parseval networks: Improving robustness to adversarial examples","author":"cisse","year":"2017","journal-title":"Proceedings of the 34th International Conference on Machine Learning"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref14","article-title":"Bayesian Differential Privacy through Posterior Sampling","author":"dimitrakakis","year":"2016","journal-title":"arXiv preprint arXiv 1306 1066v5"},{"key":"ref15","doi-asserted-by":"crossref","DOI":"10.1007\/978-3-319-77935-5_9","article-title":"Output range analysis for deep feedforward neural networks","author":"dutta","year":"2018","journal-title":"Proceedings of the Nasa Formal Methods Symposium"},{"key":"ref16","article-title":"Training verified learners with learned verifiers","author":"dvijotham","year":"2018","journal-title":"ArXiv e-prints"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/1536414.1536466"},{"key":"ref18","article-title":"The algorithmic foundations of differential privacy","author":"dwork","year":"2014","journal-title":"Foundations and Trends\ufffd in Theoretical Computer Science"},{"key":"ref19","article-title":"Robust physicalworld attacks on machine learning models","author":"evtimov","year":"2017","journal-title":"arXiv preprint arXiv 1707 08945"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/2897518.2897566"},{"key":"ref3","article-title":"Synthesizing robust adversarial examples","author":"athalye","year":"2017","journal-title":"arXiv preprint arXiv 1707 07397"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140444"},{"key":"ref5","article-title":"End to end learning for self-driving cars","author":"bojarski","year":"2016","journal-title":"CoRR"},{"key":"ref8","doi-asserted-by":"crossref","DOI":"10.1007\/978-3-642-39077-7_5","article-title":"Broadening the scope of differential privacy using metrics","author":"chatzikokolakis","year":"2013","journal-title":"International Symposium on Privacy Enhancing Technologies Symposium"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"key":"ref49","article-title":"Lower bounds on the robustness to adversarial perturbations","author":"peck","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref9","article-title":"Differentially private empirical risk minimization","author":"chaudhuri","year":"2011","journal-title":"J Mach Learn Res"},{"key":"ref46","article-title":"Towards the science of security and privacy in machine learning","volume":"abs 1611 3814","author":"papemot","year":"2016","journal-title":"CoRR"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2016.41"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178304"},{"key":"ref47","author":"parkhi","year":"0","journal-title":"Deep face recognition"},{"key":"ref42","article-title":"On detecting adversarial perturbations","author":"metzen","year":"2017","journal-title":"Proceedings of the 6th International Conference on Learning Representations"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3134057"},{"key":"ref44","author":"netzer","year":"0","journal-title":"Reading digits in natural images with unsupervised feature learning"},{"key":"ref43","article-title":"Differentiable abstract interpretation for provably robust neural networks","author":"mirman","year":"2018","journal-title":"International Conference on Machine Learning (ICML)"}],"event":{"name":"2019 IEEE Symposium on Security and Privacy (SP)","location":"San Francisco, CA, USA","start":{"date-parts":[[2019,5,19]]},"end":{"date-parts":[[2019,5,23]]}},"container-title":["2019 IEEE Symposium on Security and Privacy (SP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8826229\/8835208\/08835364.pdf?arnumber=8835364","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,17]],"date-time":"2022-07-17T17:54:37Z","timestamp":1658080477000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8835364\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,5]]},"references-count":71,"URL":"https:\/\/doi.org\/10.1109\/sp.2019.00044","relation":{},"subject":[],"published":{"date-parts":[[2019,5]]}}}