@article{Oslund_Washington_So_Chen_Ji_2022, title={Multiview Robust Adversarial Stickers for Arbitrary Objects in the Physical World}, volume={1}, url={https://ojs.bonviewpress.com/index.php/JCCE/article/view/322}, DOI={10.47852/bonviewJCCE2202322}, abstractNote={<p>Among different adversarial attacks on deep learning models for image classification, physical attacks have been considered easier<br />to implement without assuming access to victims’ devices. In this paper, we propose a practical new pipeline for launching multiview robust physical-world attacks, by creating printable adversarial stickers for arbitrary objects. In particular, a 3D model is used to estimate the camera pose in the photo. Then, by perturbing a part of the 3D model’s texture, rendering it, and overlaying the perturbation onto the physical images, realistic training images can be obtained for training a robust adversarial sticker. Experiments with our pipeline show that highly effective adversarial stickers can be generated for many different objects of different sizes and shapes while also achieving a higher attack success rate than attacks that do not utilize camera pose estimation and 3D models. In addition, by using different backgrounds in training and adding randomness to training images, the created stickers continue to function in varied environments. Attacks also remain robust in black-box tests.</p> <p> </p> <p><strong>Received</strong>: 13 July 2022 | <strong>Revised</strong>: 18 July 2022 | <strong>Accepted</strong>: 24 August 2022</p> <p> </p> <p><strong>Conflicts of Interest</strong></p> <p>The authors declare that they have no conflicts of interest to this work.</p>}, number={4}, journal={Journal of Computational and Cognitive Engineering}, author={Oslund, Scott and Washington, Clayton and So, Andrew and Chen, Tingting and Ji, Hao}, year={2022}, month={Sep.}, pages={152–158} }