backup typst
This commit is contained in:
parent
5cc9ea1e76
commit
6dff086d6c
4 changed files with 689 additions and 341 deletions
|
|
@ -20,12 +20,12 @@ author = {Zachry Basnight and Jonathan Butts and Juan Lopez and Thomas Dube},
|
|||
}
|
||||
|
||||
@misc{rieck2016attacks,
|
||||
title={Attacks on Fitness Trackers Revisited: A Case-Study of Unfit Firmware Security},
|
||||
author={Jakob Rieck},
|
||||
year={2016},
|
||||
eprint={1604.03313},
|
||||
archivePrefix={arXiv},
|
||||
primaryClass={cs.CR}
|
||||
title={Attacks on Fitness Trackers Revisited: A Case-Study of Unfit Firmware Security},
|
||||
author={Jakob Rieck},
|
||||
year={2016},
|
||||
eprint={1604.03313},
|
||||
archivePrefix={arXiv},
|
||||
primaryClass={cs.CR}
|
||||
}
|
||||
|
||||
@inproceedings {185175,
|
||||
|
|
@ -40,27 +40,27 @@ month = aug,
|
|||
}
|
||||
|
||||
@online{usb_killer,
|
||||
author = {Dark Purple },
|
||||
title = {USB Killer},
|
||||
year = 2021,
|
||||
url = {https://kukuruku.co/post/usb-killer/},
|
||||
urldate = {2021-12-18}
|
||||
author = {Dark Purple },
|
||||
title = {USB Killer},
|
||||
year = 2021,
|
||||
url = {https://kukuruku.co/post/usb-killer/},
|
||||
urldate = {2021-12-18}
|
||||
}
|
||||
|
||||
@online{lan_turtle,
|
||||
author = {Hack5},
|
||||
title = {LAN Turtle},
|
||||
year = 2021,
|
||||
url = {https://hak5.org/collections/sale/products/lan-turtle},
|
||||
urldate = {2021-12-18}
|
||||
author = {Hack5},
|
||||
title = {LAN Turtle},
|
||||
year = 2021,
|
||||
url = {https://hak5.org/collections/sale/products/lan-turtle},
|
||||
urldate = {2021-12-18}
|
||||
}
|
||||
|
||||
@online{rubber_ducky,
|
||||
author = {Hack5},
|
||||
title = {Rubber Ducky},
|
||||
year = 2021,
|
||||
url = {https://hak5.org/collections/sale/products/usb-rubber-ducky-deluxe},
|
||||
urldate = {2021-12-18}
|
||||
author = {Hack5},
|
||||
title = {Rubber Ducky},
|
||||
year = 2021,
|
||||
url = {https://hak5.org/collections/sale/products/usb-rubber-ducky-deluxe},
|
||||
urldate = {2021-12-18}
|
||||
}
|
||||
|
||||
@online{key_croc,
|
||||
|
|
@ -80,20 +80,18 @@ month = aug,
|
|||
}
|
||||
|
||||
@INPROCEEDINGS{firmware_blockchain,
|
||||
author={Lim, Jea-Min and Kim, Youngpil and Yoo, Chuck},
|
||||
booktitle={2018 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)},
|
||||
title={Chain Veri: Blockchain-Based Firmware Verification System for IoT Environment},
|
||||
year={2018},
|
||||
volume={},
|
||||
number={},
|
||||
pages={1050-1056},
|
||||
doi={10.1109/Cybermatics_2018.2018.00194}}
|
||||
author={Lim, Jea-Min and Kim, Youngpil and Yoo, Chuck},
|
||||
booktitle={2018 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)},
|
||||
title={Chain Veri: Blockchain-Based Firmware Verification System for IoT Environment},
|
||||
year={2018},
|
||||
volume={},
|
||||
number={},
|
||||
pages={1050-1056},
|
||||
doi={10.1109/Cybermatics_2018.2018.00194}
|
||||
}
|
||||
|
||||
@InProceedings{firmware_blockchain_2,
|
||||
author="Lee, Boohyung
|
||||
and Malik, Sehrish
|
||||
and Wi, Sarang
|
||||
and Lee, Jong-Hyouk",
|
||||
author={Lee, Boohyung and Malik, Sehrish and Wi, Sarang and Lee, Jong-Hyouk},
|
||||
editor="Lee, Jong-Hyouk
|
||||
and Pack, Sangheon",
|
||||
title="Firmware Verification of Embedded Devices Based on a Blockchain",
|
||||
|
|
@ -106,8 +104,7 @@ isbn="978-3-319-60717-7"
|
|||
}
|
||||
|
||||
@InProceedings{firmware_data,
|
||||
author="McMinn, Lucille
|
||||
and Butts, Jonathan",
|
||||
author={McMinn, Lucille and Butts, Jonathan},
|
||||
editor="Butts, Jonathan
|
||||
and Shenoi, Sujeet",
|
||||
title="A Firmware Verification Tool for Programmable Logic Controllers",
|
||||
|
|
@ -120,26 +117,20 @@ isbn="978-3-642-35764-0"
|
|||
}
|
||||
|
||||
@INPROCEEDINGS{firmware_crypto,
|
||||
author={Nilsson, Dennis K. and Sun, Lei and Nakajima, Tatsuo},
|
||||
booktitle={2008 IEEE Globecom Workshops},
|
||||
title={A Framework for Self-Verification of Firmware Updates over the Air in Vehicle ECUs},
|
||||
year={2008},
|
||||
volume={},
|
||||
number={},
|
||||
pages={1-5},
|
||||
doi={10.1109/GLOCOMW.2008.ECP.56}}
|
||||
author={Nilsson, Dennis K. and Sun, Lei and Nakajima, Tatsuo},
|
||||
booktitle={2008 IEEE Globecom Workshops},
|
||||
title={A Framework for Self-Verification of Firmware Updates over the Air in Vehicle ECUs},
|
||||
year={2008},
|
||||
volume={},
|
||||
number={},
|
||||
pages={1-5},
|
||||
doi={10.1109/GLOCOMW.2008.ECP.56}
|
||||
}
|
||||
|
||||
@InProceedings{firmware_sign,
|
||||
author="Jeong, Eunseon
|
||||
and Park, Junyoung
|
||||
and Son, Byeonggeun
|
||||
and Kim, Myoungsu
|
||||
and Yim, Kangbin",
|
||||
editor="Barolli, Leonard
|
||||
and Xhafa, Fatos
|
||||
and Javaid, Nadeem
|
||||
and Enokido, Tomoya",
|
||||
title="Study on Signature Verification Process for the Firmware of an Android Platform",
|
||||
author={Jeong, Eunseon and Park, Junyoung and Son, Byeonggeun and Kim, Myoungsu and Yim, Kangbin},
|
||||
editor={Barolli, Leonard and Xhafa, Fatos and Javaid, Nadeem and Enokido, Tomoya},
|
||||
title={Study on Signature Verification Process for the Firmware of an Android Platform},
|
||||
booktitle="Innovative Mobile and Internet Services in Ubiquitous Computing",
|
||||
year="2019",
|
||||
publisher="Springer International Publishing",
|
||||
|
|
@ -150,73 +141,77 @@ isbn="978-3-319-93554-6"
|
|||
|
||||
@misc{mitre,
|
||||
title = {MITRE ATT&CK® T1542.001 Pre-OS Boot: System Firmware},
|
||||
howpublished = {\url{https://attack.mitre.org/versions/v10/techniques/T1542/001/}},
|
||||
howpublished = {https://attack.mitre.org/versions/v10/techniques/T1542/001/},
|
||||
note = {Accessed: 2022-03-31}
|
||||
}
|
||||
|
||||
@misc{capec,
|
||||
title = {CAPEC-532: Altered Installed BIOS},
|
||||
howpublished = {\url{https://capec.mitre.org/data/definitions/532.html}},
|
||||
howpublished = {https://capec.mitre.org/data/definitions/532.html},
|
||||
note = {Accessed: 2022-03-31}
|
||||
}
|
||||
|
||||
@misc{coreboot,
|
||||
title = {Coreboot. Fast, secure and flexible OpenSource firmware},
|
||||
howpublished = {\url{https://www.coreboot.org/}},
|
||||
howpublished = {https://www.coreboot.org/},
|
||||
note = {Accessed: 2022-03-31}
|
||||
}
|
||||
|
||||
@misc{owrt,
|
||||
title = {OpenWrt},
|
||||
howpublished = {\url{https://openwrt.org/}},
|
||||
howpublished = {https://openwrt.org/},
|
||||
note = {Accessed: 2022-03-31}
|
||||
}
|
||||
|
||||
@misc{ddwrt,
|
||||
title = {DD-WRT},
|
||||
howpublished = {\url{https://dd-wrt.com/}},
|
||||
howpublished = {https://dd-wrt.com/},
|
||||
note = {Accessed: 2022-03-31}
|
||||
}
|
||||
|
||||
@misc{freshtomato,
|
||||
title = {FreshTomato},
|
||||
howpublished = {\url{https://www.freshtomato.org/}},
|
||||
howpublished = {https://www.freshtomato.org/},
|
||||
note = {Accessed: 2022-03-31}
|
||||
}
|
||||
|
||||
@misc{droneincrease,
|
||||
title= {Futur of Drone Industry},
|
||||
howpublished ={https://www.strategicmarketresearch.com/blogs/drone-industry-future},
|
||||
note={accessed: 2023-06}
|
||||
}
|
||||
|
||||
@misc{trustanchor,
|
||||
title = {Cisco's Trustworthy Technology Datasheet},
|
||||
howpublished = {\url{https://www.cisco.com/c/dam/en_us/about/doing_business/trust-center/docs/trustworthy-technologies-datasheet.pdf}},
|
||||
howpublished = {https://www.cisco.com/c/dam/en_us/about/doing_business/trust-center/docs/trustworthy-technologies-datasheet.pdf},
|
||||
note = {Accessed: 2022-04-06}
|
||||
}
|
||||
|
||||
@misc{downtime,
|
||||
title = {How to Calculate Data Center Downtime},
|
||||
howpublished = {\url{https://datacenterfrontier.com/how-calculate-data-center-downtime/}},
|
||||
howpublished = {https://datacenterfrontier.com/how-calculate-data-center-downtime/},
|
||||
note = {Accessed: 2022-04-06}
|
||||
}
|
||||
|
||||
|
||||
@misc{cryptoreview,
|
||||
author = {YongBin Zhou and
|
||||
DengGuo Feng},
|
||||
title = {Side-Channel Attacks: Ten Years After Its Publication and the Impacts on Cryptographic Module Security Testing},
|
||||
howpublished = {Cryptology ePrint Archive, Report 2005/388},
|
||||
year = {2005},
|
||||
note = {\url{https://ia.cr/2005/388}},
|
||||
author = {YongBin Zhou and DengGuo Feng},
|
||||
title = {Side-Channel Attacks: Ten Years After Its Publication and the Impacts on Cryptographic Module Security Testing},
|
||||
howpublished = {Cryptology ePrint Archive, Report 2005/388},
|
||||
year = {2005},
|
||||
note = {https://ia.cr/2005/388},
|
||||
}
|
||||
|
||||
@misc{curveattack,
|
||||
author = {Roberto M. Avanzi},
|
||||
title = {Side Channel Attacks on Implementations of Curve-Based Cryptographic Primitives},
|
||||
howpublished = {Cryptology ePrint Archive, Report 2005/017},
|
||||
year = {2005},
|
||||
note = {\url{https://ia.cr/2005/017}},
|
||||
author = {Roberto M. Avanzi},
|
||||
title = {Side Channel Attacks on Implementations of Curve-Based Cryptographic Primitives},
|
||||
howpublished = {Cryptology ePrint Archive, Report 2005/017},
|
||||
year = {2005},
|
||||
note = {https://ia.cr/2005/017},
|
||||
}
|
||||
|
||||
@InProceedings{keyboard,
|
||||
author="Anand, S. Abhishek
|
||||
and Saxena, Nitesh",
|
||||
author={Anand, S. Abhishek and Saxena, Nitesh},
|
||||
editor="Grossklags, Jens
|
||||
and Preneel, Bart",
|
||||
title="A Sound for a Sound: Mitigating Acoustic Side Channel Attacks on Password Keystrokes with Active Sounds",
|
||||
|
|
@ -228,22 +223,15 @@ pages="346--364",
|
|||
}
|
||||
|
||||
@INPROCEEDINGS{printer,
|
||||
|
||||
author={Al Faruque, Mohammad Abdullah and Chhetri, Sujit Rokka and Canedo, Arquimedes and Wan, Jiang},
|
||||
|
||||
booktitle={2016 ACM/IEEE 7th International Conference on Cyber-Physical Systems (ICCPS)},
|
||||
|
||||
title={Acoustic Side-Channel Attacks on Additive Manufacturing Systems},
|
||||
|
||||
year={2016},
|
||||
|
||||
volume={},
|
||||
|
||||
number={},
|
||||
|
||||
pages={1-10},
|
||||
|
||||
doi={10.1109/ICCPS.2016.7479068}}
|
||||
author={Al Faruque, Mohammad Abdullah and Chhetri, Sujit Rokka and Canedo, Arquimedes and Wan, Jiang},
|
||||
booktitle={2016 ACM/IEEE 7th International Conference on Cyber-Physical Systems (ICCPS)},
|
||||
title={Acoustic Side-Channel Attacks on Additive Manufacturing Systems},
|
||||
year={2016},
|
||||
volume={},
|
||||
number={},
|
||||
pages={1-10},
|
||||
doi={10.1109/ICCPS.2016.7479068}
|
||||
}
|
||||
|
||||
@inproceedings{iot_anoamly_sca,
|
||||
author = {Devin Spatz and Devin Smarra and Igor Ternovskiy},
|
||||
|
|
@ -261,22 +249,15 @@ URL = {https://doi.org/10.1117/12.2521450}
|
|||
}
|
||||
|
||||
@INPROCEEDINGS{power-devices,
|
||||
|
||||
author={Konstantinou, Charalambos and Maniatakos, Michail},
|
||||
|
||||
booktitle={2015 IEEE International Conference on Smart Grid Communications (SmartGridComm)},
|
||||
|
||||
title={Impact of firmware modification attacks on power systems field devices},
|
||||
|
||||
year={2015},
|
||||
|
||||
volume={},
|
||||
|
||||
number={},
|
||||
|
||||
pages={283-288},
|
||||
|
||||
doi={10.1109/SmartGridComm.2015.7436314}}
|
||||
author={Konstantinou, Charalambos and Maniatakos, Michail},
|
||||
booktitle={2015 IEEE International Conference on Smart Grid Communications (SmartGridComm)},
|
||||
title={Impact of firmware modification attacks on power systems field devices},
|
||||
year={2015},
|
||||
volume={},
|
||||
number={},
|
||||
pages={283-288},
|
||||
doi={10.1109/SmartGridComm.2015.7436314}
|
||||
}
|
||||
|
||||
@article{plc_firmware,
|
||||
title = {Firmware modification attacks on programmable logic controllers},
|
||||
|
|
@ -301,11 +282,6 @@ keywords = {Industrial control systems, Programmable logic controllers, Firmware
|
|||
|
||||
@ARTICLE{health_review, author={Yaqoob, Tahreem and Abbas, Haider and Atiquzzaman, Mohammed}, journal={IEEE Communications Surveys Tutorials}, title={Security Vulnerabilities, Attacks, Countermeasures, and Regulations of Networked Medical Devices—A Review}, year={2019}, volume={21}, number={4}, pages={3723-3768}, doi={10.1109/COMST.2019.2914094}}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@article{pacemaker,
|
||||
author = {Adrian Baranchuk and Bryce Alexander and Debra Campbell and Sohaib Haseeb and Damian Redfearn and Chris Simpson and Ben Glover },
|
||||
title = {Pacemaker Cybersecurity},
|
||||
|
|
@ -327,9 +303,7 @@ year = {2013},
|
|||
}
|
||||
|
||||
@InProceedings{railway,
|
||||
author="B{\"a}ckman, Ronny
|
||||
and Oliver, Ian
|
||||
and Limonta, Gabriela",
|
||||
author={B{\"a}ckman, Ronny and Oliver, Ian and Limonta, Gabriela},
|
||||
editor="Casimiro, Ant{\'o}nio
|
||||
and Ortmeier, Frank
|
||||
and Schoitsch, Erwin
|
||||
|
|
@ -378,22 +352,15 @@ keywords = {Industrial control systems, Programmable logic controllers, Firmware
|
|||
}
|
||||
|
||||
@INPROCEEDINGS{blockchain1,
|
||||
|
||||
author={Dhakal, Samip and Jaafar, Fehmi and Zavarsky, Pavol},
|
||||
|
||||
booktitle={2019 IEEE 19th International Symposium on High Assurance Systems Engineering (HASE)},
|
||||
|
||||
title={Private Blockchain Network for IoT Device Firmware Integrity Verification and Update},
|
||||
|
||||
year={2019},
|
||||
|
||||
volume={},
|
||||
|
||||
number={},
|
||||
|
||||
pages={164-170},
|
||||
|
||||
doi={10.1109/HASE.2019.00033}}
|
||||
doi={10.1109/HASE.2019.00033}
|
||||
}
|
||||
|
||||
@inproceedings{sca_attack,
|
||||
author = {Liu, Yannan and Wei, Lingxiao and Zhou, Zhe and Zhang, Kehuan and Xu, Wenyuan and Xu, Qiang},
|
||||
|
|
@ -457,32 +424,63 @@ month = aug,
|
|||
}
|
||||
|
||||
@dataset{dataset,
|
||||
author = {Anonymous},
|
||||
title = {{Dataset of bootup power consumption traces for
|
||||
four networking equipments.}},
|
||||
month = apr,
|
||||
year = 2022,
|
||||
publisher = {Zenodo},
|
||||
doi = {10.5281/zenodo.6419214},
|
||||
url = {https://doi.org/10.5281/zenodo.6419214}
|
||||
author = {Anonymous},
|
||||
title = {Dataset of bootup power consumption traces for four networking equipments.},
|
||||
month = apr,
|
||||
year = 2022,
|
||||
publisher = {Zenodo},
|
||||
doi = {10.5281/zenodo.6419214},
|
||||
url = {https://doi.org/10.5281/zenodo.6419214}
|
||||
}
|
||||
|
||||
@book{han2011data,
|
||||
title={Data mining: concepts and techniques},
|
||||
author={Han, Jiawei and Pei, Jian and Kamber, Micheline},
|
||||
year={2011},
|
||||
publisher={Elsevier}
|
||||
title={Data mining: concepts and techniques},
|
||||
author={Han, Jiawei and Pei, Jian and Kamber, Micheline},
|
||||
year={2011},
|
||||
publisher={Elsevier}
|
||||
}
|
||||
|
||||
@article{zimmering2021generating,
|
||||
title={Generating Artificial Sensor Data for the Comparison of Unsupervised Machine Learning Methods},
|
||||
author={Zimmering, Bernd and Niggemann, Oliver and Hasterok, Constanze and Pfannstiel, Erik and Ramming, Dario and Pfrommer, Julius},
|
||||
journal={Sensors},
|
||||
volume={21},
|
||||
number={7},
|
||||
pages={2397},
|
||||
year={2021},
|
||||
publisher={Multidisciplinary Digital Publishing Institute}
|
||||
title={Generating Artificial Sensor Data for the Comparison of Unsupervised Machine Learning Methods},
|
||||
author={Zimmering, Bernd and Niggemann, Oliver and Hasterok, Constanze and Pfannstiel, Erik and Ramming, Dario and Pfrommer, Julius},
|
||||
journal={Sensors},
|
||||
volume={21},
|
||||
number={7},
|
||||
pages={2397},
|
||||
year={2021},
|
||||
publisher={Multidisciplinary Digital Publishing Institute}
|
||||
}
|
||||
|
||||
@INPROCEEDINGS{8326960,
|
||||
author={Dey, Vishal and Pudi, Vikramkumar and Chattopadhyay, Anupam and Elovici, Yuval},
|
||||
booktitle={2018 31st International Conference on VLSI Design and 2018 17th International Conference on Embedded Systems (VLSID)},
|
||||
title={Security Vulnerabilities of Unmanned Aerial Vehicles and Countermeasures: An Experimental Study},
|
||||
year={2018},
|
||||
volume={},
|
||||
number={},
|
||||
pages={398-403},
|
||||
doi={10.1109/VLSID.2018.97}
|
||||
}
|
||||
|
||||
@INPROCEEDINGS{8433205,
|
||||
author={Chen, Wenxin and Dong, Yingfei and Duan, Zhenhai},
|
||||
booktitle={2018 IEEE Conference on Communications and Network Security (CNS)},
|
||||
title={Manipulating Drone Dynamic State Estimation to Compromise Navigation},
|
||||
year={2018},
|
||||
volume={},
|
||||
number={},
|
||||
pages={1-9},
|
||||
doi={10.1109/CNS.2018.8433205}
|
||||
}
|
||||
|
||||
@ARTICLE{8556480,
|
||||
author={Bisio, Igor and Garibotto, Chiara and Lavagetto, Fabio and Sciarrone, Andrea and Zappatore, Sandro},
|
||||
journal={IEEE Transactions on Vehicular Technology},
|
||||
title={Blind Detection: Advanced Techniques for WiFi-Based Drone Surveillance},
|
||||
year={2019},
|
||||
volume={68},
|
||||
number={1},
|
||||
pages={938-946},
|
||||
doi={10.1109/TVT.2018.2884767}
|
||||
}
|
||||
|
||||
|
|
|
|||
137
BPV/qrs/images/l3-setup.svg
Normal file
137
BPV/qrs/images/l3-setup.svg
Normal file
|
|
@ -0,0 +1,137 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="192.0601mm"
|
||||
height="74.389984mm"
|
||||
viewBox="0 0 192.0601 74.389983"
|
||||
version="1.1"
|
||||
id="svg5"
|
||||
inkscape:version="1.2.2 (b0a8486541, 2022-12-01)"
|
||||
sodipodi:docname="l3-setup.svg"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg">
|
||||
<sodipodi:namedview
|
||||
id="namedview7"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#000000"
|
||||
borderopacity="1"
|
||||
inkscape:showpageshadow="0"
|
||||
inkscape:pageopacity="0"
|
||||
inkscape:pagecheckerboard="0"
|
||||
inkscape:deskcolor="#b5b5b5"
|
||||
inkscape:document-units="mm"
|
||||
showgrid="false"
|
||||
inkscape:zoom="0.80824699"
|
||||
inkscape:cx="332.20043"
|
||||
inkscape:cy="477.57679"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="1056"
|
||||
inkscape:window-x="1920"
|
||||
inkscape:window-y="0"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="layer1" />
|
||||
<defs
|
||||
id="defs2" />
|
||||
<g
|
||||
inkscape:label="Layer 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"
|
||||
transform="translate(-5.5078843,-8.7827511)">
|
||||
<path
|
||||
style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="M 125.79958,67.680376 V 23.965394 H 72.512916 L 87.624289,59.836806 H 51.034552 L 65.604944,23.965394 H 21.351482"
|
||||
id="path696"
|
||||
sodipodi:nodetypes="ccccccc" />
|
||||
<path
|
||||
style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 125.79958,23.965394 h 12.17811"
|
||||
id="path794" />
|
||||
<path
|
||||
style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 125.79958,67.680376 h 12.17811"
|
||||
id="path796" />
|
||||
<rect
|
||||
style="fill:#808080;fill-opacity:1;stroke-width:0.396875;stroke-linecap:square;stroke-dasharray:1.5875, 1.5875;stop-color:#000000"
|
||||
id="rect478"
|
||||
width="59.590294"
|
||||
height="30.365286"
|
||||
x="137.97769"
|
||||
y="8.7827511"
|
||||
rx="3.9252319"
|
||||
ry="3.9252319" />
|
||||
<rect
|
||||
style="fill:#808080;fill-opacity:1;stroke-width:0.396875;stroke-linecap:square;stroke-dasharray:1.5875, 1.5875;stop-color:#000000"
|
||||
id="rect640"
|
||||
width="59.590294"
|
||||
height="30.365286"
|
||||
x="137.97769"
|
||||
y="52.497734"
|
||||
rx="3.9252319"
|
||||
ry="3.9252319" />
|
||||
<path
|
||||
style="fill:#5599ff;stroke:none;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
d="M 11.949391,13.390089 5.5078843,24.378877 H 10.364585 L 8.7743913,32.997957 15.742239,22.644076 H 9.9280383 Z"
|
||||
id="path1569"
|
||||
sodipodi:nodetypes="ccccccc" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:7px;line-height:1.25;font-family:'STIX Two Text';-inkscape-font-specification:'STIX Two Text Bold';text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;stroke-width:0.264583"
|
||||
x="167.80084"
|
||||
y="22.061394"
|
||||
id="text1573"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan1571"
|
||||
style="text-align:center;text-anchor:middle;fill:#ffffff;stroke-width:0.264583"
|
||||
x="167.80084"
|
||||
y="22.061394">Machine 1</tspan><tspan
|
||||
sodipodi:role="line"
|
||||
style="text-align:center;text-anchor:middle;fill:#ffffff;stroke-width:0.264583"
|
||||
x="167.80084"
|
||||
y="30.811394"
|
||||
id="tspan1631">IDLE</tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:7px;line-height:1.25;font-family:'STIX Two Text';-inkscape-font-specification:'STIX Two Text Bold';text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;stroke-width:0.264583"
|
||||
x="167.79384"
|
||||
y="65.006378"
|
||||
id="text1627"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan1625"
|
||||
style="text-align:center;text-anchor:middle;fill:#ffffff;stroke-width:0.264583"
|
||||
x="167.79384"
|
||||
y="65.006378">Machine 2</tspan><tspan
|
||||
sodipodi:role="line"
|
||||
style="text-align:center;text-anchor:middle;fill:#ffffff;stroke-width:0.264583"
|
||||
x="167.79384"
|
||||
y="73.756378"
|
||||
id="tspan1629">to protect</tspan></text>
|
||||
<rect
|
||||
style="fill:#808080;fill-opacity:1;stroke:none;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-dashoffset:0;stop-color:#000000"
|
||||
id="rect1689"
|
||||
width="21.035084"
|
||||
height="12.211226"
|
||||
x="58.857655"
|
||||
y="53.731194"
|
||||
rx="0"
|
||||
ry="0" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:7px;line-height:1.25;font-family:'STIX Two Text';-inkscape-font-specification:'STIX Two Text Bold';text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;stroke-width:0.264583"
|
||||
x="70.697151"
|
||||
y="74.324738"
|
||||
id="text1745"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan1743"
|
||||
style="text-align:center;text-anchor:middle;stroke-width:0.264583"
|
||||
x="70.697151"
|
||||
y="74.324738">Power </tspan><tspan
|
||||
sodipodi:role="line"
|
||||
style="text-align:center;text-anchor:middle;stroke-width:0.264583"
|
||||
x="70.697151"
|
||||
y="83.074738"
|
||||
id="tspan1747">Measurement</tspan></text>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 5.5 KiB |
565
BPV/qrs/main.typ
565
BPV/qrs/main.typ
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
#import "template.typ": *
|
||||
#show: ieee.with(
|
||||
title: "Firmware Integrity Verification with Side-Channel Power Consumption Analysis",
|
||||
title: "Independent Few-shot Firmware Integrity Verification with Side-Channel Power Analysis",
|
||||
abstract: [
|
||||
|
||||
],
|
||||
|
|
@ -26,86 +26,104 @@
|
|||
bibliography-file: "bibli.bib",
|
||||
)
|
||||
|
||||
// #let agd(content) = {
|
||||
// text(blue, size:15pt)[#sym.star.filled]
|
||||
// [#footnote[agd: #content]]
|
||||
// }
|
||||
|
||||
// #let cn = text(purple,size:15pt)[ #super[citation needed]]
|
||||
|
||||
// #let ps_counter = counter("ps_counter")
|
||||
// #let ps(title: none, content: none) = block[
|
||||
// #ps_counter.step()
|
||||
// *Problem Statement #ps_counter.display() (#title)*:
|
||||
// #content
|
||||
// ]
|
||||
|
||||
// // Define acronyms
|
||||
// #let acronyms=(
|
||||
// "IDS": "Intrusion Detection System",
|
||||
// "SVM": "Support Vector Machine",
|
||||
// )
|
||||
// // use heading 99 to display acronyms
|
||||
// #show heading.where(level: 99): it => text(weight: "regular", it.body)
|
||||
// #let refs=state("plop",acronyms)
|
||||
|
||||
// // Initialize the expansion list to keep track of the expansions
|
||||
// // #let exp = ("dummy": 0)
|
||||
// // #for key in acronyms.keys(){
|
||||
// // exp.insert(key,0)
|
||||
// // }
|
||||
|
||||
// // function to call an acronym. Expands it on first encounter.
|
||||
// #let acr(a: none) = {
|
||||
// // if exp.at(a) == 0{
|
||||
// let long = acronyms.at(a)
|
||||
// heading(level: 99)[#long (#a)]
|
||||
// // exp.insert(a,1)
|
||||
// //}
|
||||
// }
|
||||
|
||||
#let acronyms = (
|
||||
"BPV": "Boot Process Verifier",
|
||||
"IDS": "Intrusion Detection System",
|
||||
"SVM": "Support Vector Machine",
|
||||
"PLC": "Programable Logic Controlers",
|
||||
"DC": "Direct Current",
|
||||
"AC": "Alternating Current",
|
||||
"APT": "Advanced Persistent Threats",
|
||||
"PDU": "Power Distribution Unit",
|
||||
"VLAN": "Virtual Local Area Network",
|
||||
"VPN": "Virtual Private Network",
|
||||
"IQR": "Inter-Quartile Range",
|
||||
"IT": "Information Technology",
|
||||
"OEM": "Original equipment manufacturer",
|
||||
"SCA": "Side-Channel Analysis",
|
||||
"ROM": "Read Only Memory",
|
||||
"AIM": "Anomaly-Infused Model",
|
||||
"RFC": "Random Forest Classifier"
|
||||
)
|
||||
|
||||
#show ref: r =>{
|
||||
if str(r.target) in acronyms{
|
||||
locate(loc =>{
|
||||
let term = str(r.target)
|
||||
let res = query(ref.where(target: r.target).before(loc),loc)
|
||||
if res.len() == 1{
|
||||
[#acronyms.at(term) (#term)]
|
||||
}else{
|
||||
term
|
||||
}
|
||||
})
|
||||
}else{
|
||||
#show ref: r =>{// Overload the reference definition
|
||||
|
||||
// Grab the term, target of the reference
|
||||
let term = if type(r.target) == "label"{
|
||||
str(r.target)
|
||||
}
|
||||
else{
|
||||
// I don't know why the target could not be a type label but it is handled
|
||||
none
|
||||
}
|
||||
|
||||
if term in acronyms{
|
||||
// Grab definition of the term
|
||||
let definition = acronyms.at(term)
|
||||
// Generate the key associated with this term
|
||||
let state-key = "acronym-state-" + term
|
||||
// Create a state to keep track of the expansion of this acronym
|
||||
state(state-key,false).display(seen => {if seen{term}else{[#definition (#term)]}})
|
||||
// Update state to true as it has just been defined
|
||||
state(state-key, false).update(true)
|
||||
}
|
||||
else{
|
||||
r
|
||||
}
|
||||
}
|
||||
|
||||
= Introduction
|
||||
The firmware of any embedded system is susceptible to attacks. Since firmware provides many security features, it is always of major interest to attackers. Every year, a steady number of new vulnerabilities are discovered. Any device that requires firmware, such as computers @185175, @PLC @BASNIGHT201376, or IoT devices @rieck2016attacks, is vulnerable to these attacks. There are multiple ways to leverage a firmware attack. Reverting firmware to an older version allows an attacker to reopen discovered and documented flaws. Cancelling an update can ensure that previously deployed attacks remain available. Finally, implementing custom firmware enables full access to the machine.
|
||||
// add spaces around lists and tables
|
||||
#show enum: l =>{v(10pt)
|
||||
l
|
||||
v(5pt)}
|
||||
|
||||
The issue of malicious firmware is not recent. The oldest firmware vulnerability recorded on #link("cve.mitre.org") related to firmware dates back to 1999. Over the years, many solutions have been proposed to mitigate this issue. The first and most common countermeasure is verifying the integrity of the firmware before applying an update. The methods to verify a firmware typically include but are not limited to cryptography @firmware_crypto, blockchain technology @firmware_blockchain @firmware_blockchain_2 or direct data comparison @firmware_data. Depending on the complexity, the manufacturer can provide a tag @firmware_sign of the firmware or encrypt it to provide trust that it is genuine. The integrity verification can also be performed at run-time as part of the firmware itself or with dedicated hardware @trustanchor.
|
||||
#show list: l =>{v(10pt)
|
||||
l
|
||||
v(5pt)}
|
||||
|
||||
#show table: t=>{v(10pt)
|
||||
t
|
||||
v(5pt)}
|
||||
|
||||
|
||||
#agd[Change title]
|
||||
= Introduction
|
||||
The firmware of any embedded system is susceptible to attacks. Since firmware provides many security features, it is always of major interest to attackers.
|
||||
Every year, a steady number of new vulnerabilities are discovered. Any device that requires firmware, such as computers @185175, @PLC @BASNIGHT201376, or IoT devices @rieck2016attacks, is vulnerable to these attacks.
|
||||
There are multiple ways to leverage a firmware attack. Reverting firmware to an older version allows an attacker to reopen discovered and documented flaws.
|
||||
Cancelling an update can ensure that previously deployed attacks remain available. Finally, implementing custom firmware enables full access to the machine.
|
||||
|
||||
The issue of malicious firmware is not recent.
|
||||
The oldest firmware vulnerability recorded on #link("cve.mitre.org") related to firmware dates back to 1999.
|
||||
Over the years, many solutions have been proposed to mitigate this issue.
|
||||
The first and most common countermeasure is verifying the integrity of the firmware before applying an update.
|
||||
The methods to verify a firmware typically include but are not limited to cryptography @firmware_crypto, blockchain technology @firmware_blockchain @firmware_blockchain_2 or direct data comparison @firmware_data. Depending on the complexity, the manufacturer can provide a tag @firmware_sign of the firmware or encrypt it to provide trust that it is genuine.
|
||||
The integrity verification can also be performed at run-time as part of the firmware itself or with dedicated hardware @trustanchor.
|
||||
|
||||
The above solutions to firmware attacks share the common flaw of being applied to the same machine they are installed on. This allows an attacker to bypass these countermeasures after infecting the machine. An attacker that could avoid triggering a verification, tamper with the verification mechanism, feed forged data to the verification mechanism, or falsify the verification report could render any defense useless. @IDS are subjected to a trade-off between having access to relevant and meaningful information and keeping the detection mechanism separated from the target machine. Our solution addresses this trade-off by leveraging side-channel information.
|
||||
|
||||
= Contributions
|
||||
This paper presents a novel solution for firmware verification using side-channel analysis. Building on the assumption that every security mechanism operating on a host is vulnerable to being bypassed, we propose using the device's power consumption signature during the firmware execution to assess its integrity. Because of the intrinsic properties of side-channel information, the integrity evaluation is based on unforgeable data and does not involve any communication with the host. A distance-based outlier detector that uses power traces of an unaltered boot-up sequence can learn the expected pattern and detect any variation in a new boot-up sequence. This novel solution could detect various attacks centred around manipulating firmware. In addition to its versatility of detection, it is also easily retrofittable to almost any embedded system with @DC input and a consistent boot sequence. It requires minimal training examples and minor hardware modification in most cases, especially for @DC powered devices.
|
||||
This paper presents a novel solution for firmware verification using side-channel analysis.
|
||||
Building on the assumption that every security mechanism operating on a host is vulnerable to being bypassed, we propose using the device's power consumption signature during the firmware execution to assess its integrity.
|
||||
Because of the intrinsic properties of side-channel information, the integrity evaluation is based on does not involve any communication with the host and is based on data difficult to forge.
|
||||
A distance-based outlier detector that uses power traces of a nominal boot-up sequence can learn the expected pattern and detect any variation in a new boot-up sequence.
|
||||
This novel solution can detect various attacks centred around manipulating firmware.
|
||||
In addition to its versatility of detection, it is also easily retrofittable to almost any embedded system with @DC input and a consistent boot sequence.
|
||||
It requires minimal training examples and minor hardware modification in most cases, especially for @DC powered devices.
|
||||
|
||||
//% What we propose in this context: detecting firmware manipulations with power consumption
|
||||
== Paper Organization
|
||||
We elaborate on the type of attacks that our method aims to mitigate in the threat model section \ref{threat} and the technology we leverage to capture relevant information in section \ref{sca}. The models developed for this study are detailed in section \ref{feature_eng}. Finally, we present the experiment designed to illustrate the performance of our novel detector in section \ref{experiment}.
|
||||
// % Reminder of how the paper is organized
|
||||
We elaborate on the type of attacks that our method aims to mitigate in the threat model section @threat and the technology we leverage to capture relevant information in Section @SCA.
|
||||
Secion~@bpv describe the proposed solution.
|
||||
Sections~@exp-network,~@exp-drone and~@aim present test cases that illustrates applications and variations of the @BPV.
|
||||
Finally, the paper finishes with Section @discussion that provides more insight on specific aspects of the proposed solution and Section~@conclusion for the conclusion.
|
||||
|
||||
|
||||
= Overview
|
||||
There are plenty of attack points on an ordinary machine. Depending on the machine's vulnerability and the attacker's skill, variable intrusion levels are possible. A successful firmware attack can remain undetected by common \glspl{ids} as the attacker can deceive detection methods at multiple levels. Moreover, firmware tampering is not necessarily a complex operation.
|
||||
There are plenty of attack points on an ordinary machine.
|
||||
Depending on the machine's vulnerability and the attacker's skill, variable intrusion levels are possible.
|
||||
A successful firmware attack can remain undetected by common @IDS as the attacker can deceive detection methods at multiple levels.
|
||||
Moreover, firmware tampering is not necessarily a complex operation.
|
||||
#agd[this section is weird]
|
||||
|
||||
== Threat Model<threat>
|
||||
The range of attacks that can be performed by tampering with the boot process is extensive. Because the firmware is responsible for the initialization of the components, the low-level communications, and some in-depth security features, executing adversary code in place of the expected firmware is a powerful capability @mitre @capec. If given enough time, information or access, an attacker could take complete control of the machine and pave the way to future @APT.
|
||||
|
|
@ -114,26 +132,27 @@ A firmware modification is defined as implementing any change in the firmware co
|
|||
|
||||
Downgrading the firmware to an older version is an efficient way to render a machine vulnerable to attacks. Opposite to writing custom firmware, it requires little information about the machine. All the documentation and resources are easily accessible online from the manufacturer. Even the reopened exploits are likely to be documented as they are the reason for the firmware upgrade. An attacker would only need to wait for vulnerabilities to be discovered and then revert the firmware to an older version. These properties make the firmware downgrade a powerful first step to performing future attacks. Custom firmware could need to be written for more subtle or advanced attacks. This requires more work and information as firmware codes are not open source and are challenging to reverse engineer. Moreover, the firmware is tailored for a specific machine, and it can be difficult for an attacker to test a custom firmware attack. Although, if a custom firmware can be successfully implemented, almost any attack can be performed. Finally, a firmware upgrade could also be used to open a newly discovered vulnerability.
|
||||
|
||||
A complete firmware change is another form of firmware manipulation. The manufacturer's firmware is replaced by another available firmware that supports the same machine. Such alternatives can be found for computers @coreboot, routers @owrt @ddwrt @freshtomato, but also video game consoles or various embedded machines. These alternative firmware are often open-source and provide more features, capabilities and performances as they are often updated and optimized by their community. Implementing an alternative firmware on a machine could allow an attacker to gain control of it without necessarily alerting the end-user.
|
||||
A complete firmware change is another form of firmware manipulation. The manufacturer's firmware is replaced by another available firmware that supports the same machine. Such alternatives can be found for computers @coreboot, routers @owrt @ddwrt @freshtomato, but also video game consoles or various embedded machines.
|
||||
These alternative firmware are often open-source and provide more features, capabilities and performances as they are often updated and optimized by their community. Implementing alternative firmware on a machine could allow an attacker to gain control of it without necessarily alerting the end user.
|
||||
|
||||
The last firmware manipulation is to write custom firmware for a specific machine. This is generally very hard to perform as the firmware is particular to the machine it is applied on. Writing custom firmware allows for unlimited access to the machine at the cost of a more complex attack.
|
||||
|
||||
== Side Channel Analysis<sca>
|
||||
\gls{sca} leverages the emissions of a system to gain information about its operations. Side channels are defined as any involuntary emission from a system. Historically, the main side channels are sound, power consumption or electromagnetic. The common use of side-channel is in the context of attacks. The machine state is leveraged to extract critical information, allowing powerful and difficult to mitigate attacks. \gls{sca} attacks are commonly applied to cryptography @cryptoreview @curveattack, keyboard typing @keyboard, printers @printer and many more. Side-channel attacks can be easy to implement depending on the chosen channel. Power consumption is a reliable source of information but requires physical access to the machine. Sound and electromagnetic fields can be measured for a distance but are also typically more susceptible to measurement location @iot_anoamly_sca.
|
||||
@SCA leverages the emissions of a system to gain information about its operations. Side channels are defined as any involuntary emission from a system. Historically, the main side channels are sound, power consumption or electromagnetic. The common use of side-channel is in the context of attacks. The machine state is leveraged to extract critical information, allowing powerful and difficult-to-mitigate attacks. @SCA attacks are commonly applied to cryptography @cryptoreview @curveattack, keyboard typing @keyboard, printers @printer and many more. Side-channel attacks can be easy to implement depending on the chosen channel. Power consumption is a reliable source of information but requires physical access to the machine. Sound and electromagnetic fields can be measured for a distance but are also typically more susceptible to measurement location @iot_anoamly_sca.
|
||||
|
||||
Electrical power consumption is especially appropriate for side-channel analysis for many reasons. First, it is easy to measure in a reproducible manner. Then, it can be easy to get access to relevant power cables with little tampering from the machine when the power conversion from \gls{ac} to \gls{dc} power is performed outside the machine. It is also a common side channel to all embedded systems as they all consume electricity. Finally, it is hard to fake from the developer's point of view. Because of the multiple abstraction layers between the code of a program and its implementation at the hardware level, any change in the code will likely result in a different power consumption pattern (see \ref{fig:boot-up}). This is especially true when considering firmware or machines with low computation capabilities or highly specialized devices that have deterministic and stable execution patterns at boot-up.
|
||||
|
||||
$ a + b = gamma $
|
||||
Electrical power consumption is especially appropriate for side-channel analysis for many reasons. First, it is easy to measure in a reproducible manner. Then, it can be easy to get access to relevant power cables with little tampering from the machine when the power conversion from @AC to @DC power is performed outside the machine. It is also a common side channel to all embedded systems as they all consume electricity. Finally, it is hard to fake from the developer's point of view. Because of the multiple abstraction layers between the code of a program and its implementation at the hardware level, any change in the code will likely result in a different power consumption pattern (see \ref{fig-boot-up}). This is especially true when considering firmware or machines with low computation capabilities or highly specialized devices that have deterministic and stable execution patterns at boot-up.
|
||||
|
||||
== Related Work
|
||||
Historically, the firmware was written on a \gls{rom}, and it was impossible to change. With the growing complexity of embedded systems, manufacturers developed procedures to allow remote firmware upgrades. Firmware upgrades can address performances or security flaws or, less frequently, add features. Unfortunately, these firmware upgrade mechanisms can also be leveraged by an attacker to implement unauthorized or malicious pieces of software in the machine. Almost all embedded systems are vulnerable to firmware attacks. In industrial applications, control systems such as power systems field devices @power-devices, \glspl{plc} @plc_firmware, or any other industrial embedded system @santamarta2012here. Safety-critical environment are also prime targets including medical devices @health_review @pacemaker @medical_case_study, railway systems @railway or automotive systems @cars.
|
||||
Historically, the firmware was written on a @ROM, and it was impossible to change. With the growing complexity of embedded systems, manufacturers developed procedures to allow remote firmware upgrades. Firmware upgrades can address performances or security flaws or, less frequently, add features. Unfortunately, these firmware upgrade mechanisms can also be leveraged by an attacker to implement unauthorized or malicious pieces of software in the machine. Almost all embedded systems are vulnerable to firmware attacks. In industrial applications, control systems such as power systems field devices @power-devices, @PLC @plc_firmware, or any other industrial embedded system @santamarta2012here. Safety-critical environments are also prime targets, including medical devices @health_review @pacemaker @medical_case_study, railway systems @railway or automotive systems @cars.
|
||||
|
||||
Different security mechanisms have been implemented by manufacturers to guarantee the integrity of the firmware. The first and most common protection is code signing. The firmware code is cryptographically signed or a checksum is computed. This software signature is provided by the manufacturer and is checked against the signature of the installed firmware. This method suffers many possible bypasses. First, the firmware can be modified at the manufacturer level @BASNIGHT201376, generating a trusted signature of the modified firmware. Second, the verification can simply be bypassed @9065145. Finally, the result of the test can be forged to report a valid firmware, even with dedicated hardware @thrangrycats. Blockchain technology is also considered for guaranteeing firmware integrity @blockchain1. Blockchain is a cryptographic chain of trust where each link is integrated in the next to guarantee that the information in the chain have not been modified. This technology could provide software integrity verification at each point where a supply chain attack is possible. However, the blockchain still needs to be verified at some point and this verification can still be bypassed or forged. A complementary approach to software verification is to leverage side-channel information produced by the machine at runtime.
|
||||
Different security mechanisms have been implemented by manufacturers to guarantee the integrity of the firmware. The first and most common protection is code signing. The firmware code is cryptographically signed, or a checksum is computed. This software signature is provided by the manufacturer and is checked against the signature of the installed firmware. This method suffers many possible bypasses. First, the firmware can be modified at the manufacturer level @BASNIGHT201376, generating a trusted signature of the modified firmware. Second, the verification can be bypassed @9065145. Finally, the result of the test can be forged to report a valid firmware, even with dedicated hardware @thrangrycats. Blockchain technology is also considered for guaranteeing firmware integrity @blockchain1. Blockchain is a cryptographic chain of trust where each link is integrated in the next to guarantee that the information in the chain has not been modified. This technology could provide software integrity verification at each point where a supply chain attack is possible. However, the blockchain still needs to be verified at some point, and this verification can still be bypassed or forged. A complementary approach to software verification is to leverage side-channel information produced by the machine at runtime.
|
||||
|
||||
Historically, \gls{sca} in general and power analysis is mainly used by attackers @sca_attack. Power consumption generally leaks execution information about the running software that can be leveraged to perform various attacks. However, defense is also a promising application for this technology with runtime anomaly detection @timing or specific attack detection @DTU. Notably, Clark et al. @wud proposed in 2013 a power consumption-based malware detector for medical devices. These defense mechanisms are powerful at enabling the protection of systems that cannot host defense software. Unfortunately, common methods usually rely on a lot of training data and neural network algorithms to perform detection. These models can yield excellent performances for classifications at the cost of expensive data collection and anomalous trace examples.
|
||||
Historically, @SCA in general and power analysis is mainly used by attackers @sca_attack. Power consumption generally leaks execution information about the running software that can be leveraged to perform various attacks. However, defense is also a promising application for this technology with runtime anomaly detection @timing or specific attack detection @DTU. Notably, Clark et al. @wud proposed in 2013 a power consumption-based malware detector for medical devices. These defense mechanisms are powerful at enabling the protection of systems that cannot host defense software. Unfortunately, common methods usually rely on a lot of training data and neural network algorithms to perform detection. These models can yield excellent performances for classifications at the cost of expensive data collection and anomalous trace examples.
|
||||
|
||||
#agd[add a paragraph about forging consumption (Maya obfuscation) and data augmentation]
|
||||
|
||||
|
||||
= Boot Process Verification
|
||||
= Boot Process Verification<bpv>
|
||||
Verifying the firmware of the machine using its power consumption represents a time series classification problem described in the problem statement:
|
||||
|
||||
#ps(title: "Boot Process Verification")[
|
||||
|
|
@ -146,232 +165,384 @@ The number of training time series $N$ is considered small relative to the usual
|
|||
All time series considered in this problem ($T union u$) are all of length $L$ and synchronized at capture time; see section @sds for more details about the synchronization process.
|
||||
|
||||
== Detection Models<detector>
|
||||
// The \gls{bpv} is responsible for detecting anomalies in a boot sequence power trace. The training phase is based on a few normal traces that are leveraged to train two types of distance-based detectors. After training, any new boot-up trace can be evaluated to verify the integrity of the firmware. An overview of the \gls{bpv} is presented in @fig-overview #footnote[The source code for the BPV is made available upon request].
|
||||
|
||||
// #figure(
|
||||
// image("images/schematic.svg", width: 90%),
|
||||
// caption: [Overview of the \gls{bpv} model training and evaluation.],
|
||||
// )<fig-overview>
|
||||
// #agd["Update figure to remove anomaly generation"]
|
||||
#figure(
|
||||
image("images/schematic.svg", width: 90%),
|
||||
caption: [Overview of the @BPV model training and evaluation.],
|
||||
)<fig-overview>
|
||||
#agd["Update figure to remove anomaly generation"]
|
||||
|
||||
The BPV performs classification of the boot traces using a distance-based detector and a threshold.
|
||||
The @BPV performs classification of the boot traces using a distance-based detector and a threshold.
|
||||
The core of the detection is the computation of the distance between the new trace $u$ and the training traces $T$.
|
||||
If this distance is greater than the pre-computed threshold, then the BPV classifies the new trace as anomalous.
|
||||
If this distance is greater than the pre-computed threshold, then the @BPV classifies the new trace as anomalous.
|
||||
Otherwise, the new trace is nominal.
|
||||
@fig-overview presents an overview of the model's data flow.
|
||||
|
||||
The training phase consists in computing the threshold based on the known good training traces.
|
||||
Two main specificities of this problem make the computation of the threshold difficult.
|
||||
First, the training dataset only contains nominal traces.
|
||||
This assumption is important as there are a near inifinite ways how a boot sequence can be altered to create a malicious or malfunctining device.
|
||||
The BPV aims at fingerprinting the nominal sequence, not recognizing the possible abnormal sequences.
|
||||
This assumption is important as there are a nearly infinite number of ways how a boot sequence can be altered to create a malicious or malfunctioning device.
|
||||
The @BPV aims at fingerprinting the nominal sequence, not recognizing the possible abnormal sequences.
|
||||
Thus, the model can only describe the nominal traces statistically, based on the available examples, and assume that outliers to this statistical model correspond to abnormal boot sequences.
|
||||
|
||||
Second, the number of training sample is small.
|
||||
Second, the number of training samples is small.
|
||||
In this case, small is relative to the usual number of training samples leveraged for time series classification #cn.
|
||||
We assume that the training dataset contains between ten and 100 samples.
|
||||
This assumption is important for realisme.
|
||||
This assumption is important for realism.
|
||||
To keep the detector non-disruptive, the nominal boot sequences are captured during normal operation of the device.
|
||||
However, the boot of a machine is a rare event and thus the training dataset must remain small.
|
||||
|
||||
The training sequence of the BPV computes the distance threshold based on a statistical description of the distribution of distance between each pair of normal traces.
|
||||
The training sequence of the @BPV computes the distance threshold based on a statistical description of the distribution of distance between each pair of normal traces.
|
||||
The training sequence folows two steps.
|
||||
+ The sequence compute the distance between all pairs of training traces $D = {d(t_i,t_j) forall i,j in [1,...,N]^2; i eq.not j }$.
|
||||
+ The sequence computes the threshold as $"thresh" = 1.5 dot "IQR"(D)$ with IQR the Inter-Quartile Range of the distances set $D$.
|
||||
|
||||
The \gls{iqr} is a measure of the dispersion of samples.
|
||||
The @IQR is a measure of the dispersion of samples.
|
||||
It is based on the first and third quartiles and defined as $ "IQR" = Q_3 - Q_1$ with $Q_3$ the third quartile and $Q_1$ the first quartile.
|
||||
This value is commonly used @han2011data to detect outliers as a similar but more robust alternative to the $3"sigma"$ interval of a Gaussian distribution.
|
||||
To apply the \gls{iqr} to the times series, we compute first compute the average of the NORMAL traces.
|
||||
To apply the @IQR to the times series, we compute first compute the average of the NORMAL traces.
|
||||
This average serves as a reference for computing the distance of each trace.
|
||||
The Euclidean distance is computed between each trace and the reference, and the \gls{iqr} of these distances are computed.
|
||||
The Euclidean distance is computed between each trace and the reference, and the @IQR of these distances are computed.
|
||||
The distance threshold takes the value $1.5 * "IQR"$. For the detection, the distance of each new trace to the reference is computed and compared to the threshold.
|
||||
If the distance is above the threshold, the new trace is considered anomalous.
|
||||
|
||||
=== Support For Multi-Modal Boot-up Sequences
|
||||
=== Support For Multi-modal Boot-up Sequences
|
||||
|
||||
#agd[Add image from drone with multiple modes]
|
||||
Some machines can boot following multiple different boot-up sequence that are considered normal.
|
||||
There can exists various reason for such behavior.
|
||||
For example, a machine can perform recovery operations if the power can interupted while the machine was off, or perform heath check on component that may pass or fail and trigger deeper inspections procedure.
|
||||
Because the machines are trated as black boxes, it is important for the BPV to deal with these multiple modes during training.
|
||||
Because the machines are trated as black boxes, it is important for the @BPV to deal with these multiple modes during training.
|
||||
Our approach is to developp one model per mode following the same procedure as for a single mode, presented in section @detector.
|
||||
Then, the detection logic evolves to consider the new trace nominal if it mached any of the models.
|
||||
If the new trace does not match any model, then it does not follow any of the nominal modes and is considered anbormal.
|
||||
@fig:modes illustrate the trained BPV models when two modes are present in the bootup sequence.
|
||||
@fig-modes illustrate the trained @BPV models when two modes are present in the bootup sequence.
|
||||
|
||||
#figure(
|
||||
image("images/training.svg", width:100%),
|
||||
caption: ["BPV model trained with two modes."]
|
||||
)<fig:modes>
|
||||
)<fig-modes>
|
||||
|
||||
= Test Case 1: Network Devices<exp-network>
|
||||
|
||||
To verify the performance of the proposed detector, we design an experiment that aims at detecting firmware modifications on different devices .
|
||||
Networking devices are a vital component of any IT organization, from individual houses to complete data centers @downtime.
|
||||
A network failure can result in significant downtime that is extremely expensive to data centers.
|
||||
Compromised network devices can also result in data breaches.
|
||||
Networking devices are a vital component of any organization, from individual houses to complete data centers @downtime.
|
||||
A network failure can result in significant downtime that is extremely expensive for data centers.
|
||||
Compromised network devices can also result in data breaches and @APT.
|
||||
These devices are generally highly specialized in processing and transmitting information as fast as possible.
|
||||
We consider four machines that represent consumer-available products for different prices and performance range.
|
||||
We consider four machines that represent consumer-available products for different prices and performance ranges.
|
||||
|
||||
- Asus Router RT-N12 D1. This router is a low-end product that provides switch, router and wireless access point capabilities for home usage.
|
||||
- Linksys Router MR8300 v1.1. This router is a mid-range product that offers the same capabilities as the Asus router with better performance at a higher price.
|
||||
- TP-Link Switch T1500G-10PS. This 8-port switch offers some security features for low-load usage.
|
||||
- HP Switch Procurve 2650 J4899B. This product is enterprise-oriented and provides more performance than the TP-Link switch. This is the only product of the selection that required hardware modification, as the power supply is internal to the machine. The modification consists in cutting the 5V cables to implement the capture system.
|
||||
|
||||
None of the selected devices supports the installation of host-based @IDS or firmware integrity verification. Firmware is verified only during updates with a non-public mechanism. This experiment is designed to illustrate the firmware verification capability of a side-channel @IDS for these machines where common @IDS may not be applicable.
|
||||
None of the selected devices supports the installation of host-based @IDS or firmware integrity verification.
|
||||
The firmware is verified only during updates with a proprietary mechanism.
|
||||
This experiment illustrates the firmware verification capability of a side-channel @IDS for these machines where common @IDS may not be applicable.
|
||||
|
||||
//% \spabs{The text above connects with the listing of devices, but the text below is not connected and was written to be at the start and refers to the experiment we must have explained above as "this study"}
|
||||
//% \spabs{Suggested to just continue the previous text and explain that we are trying to perform an experiment that can do that (firmware verif) for the mentioned hardware that lacks it, and for that bla bla... we use a capture box.......}
|
||||
== Experimental Setup<setup>
|
||||
Although this experiment is conducted in a controlled environment, the setup is representative of an real deployment.
|
||||
We use a hardware device referred to as the capture box @hidden placed in series with the primary power cable of the target device.
|
||||
The capture box's shunt resistor generates a voltage drop representative of the global power consumption of the machine.
|
||||
This voltage drop value is recorded at a sampling rate of 10 KSPS.
|
||||
These samples are packaged in small fixed-size chunks and sent to a data aggregation server on a private @VLAN.
|
||||
The data aggregation server is responsible for gathering data from all of our capture boxes and sending it via a @VPN tunnel to a storage server as 10s time series files.
|
||||
|
||||
== Experimental Setup
|
||||
Although this experiment is conducted in a controlled environment, the setup is representative of an actual deployment. We use a hardware device referred to as the capture box @hidden} placed in series with the primary power cable of the target device. The capture box's shunt resistor generates a voltage drop representative of the global power consumption of the machine. This voltage drop value is recorded at a sampling rate of \numprint[KSPS]{10}. These samples are packaged in small fixed-size chunks and sent to a data aggregation server on a private \gls{vlan}. The data aggregation server is responsible for gathering data from all of our capture boxes and sending it via a \gls{vpn} tunnel to a storage server as \numprint[s]{10} time series files.
|
||||
|
||||
We gather data from the four networking equipment which are connected to a managed \gls{pdu}. This \gls{pdu}'s output can be controlled by sending instructions on a telnet interface and enables turning each machine on or off automatically. Each machine will undergo firmware change or version change to represent a firmware attack.
|
||||
We gather data from the four networking equipment which are connected to a managed @PDU.
|
||||
This @PDU's output can be controlled by sending instructions on a telnet interface and enables turning each machine on or off automatically.
|
||||
Each machine will undergo firmware change or version change to represent a firmware attack.
|
||||
The changes are listed in @tab-machines.
|
||||
|
||||
#figure(
|
||||
table(
|
||||
columns: (auto,auto,auto,auto),
|
||||
align: horizon,
|
||||
[*Equipment*], [*Original \ Firmware*], [*Modification 1*], [*Modification 2*],
|
||||
[TP-Link\ Switch], [20200805], [20200109], [],
|
||||
[HP Procurve\ Switch], [H.10.119], [H.10.117], [],
|
||||
[TP-Link\ Switch], [20200805], [20200109], align(center, [X]),
|
||||
[HP Procurve\ Switch], [H.10.119], [H.10.117], align(center, [X]),
|
||||
[Asus Router], [Latest EOM], [OpenWrt\ v21.02.2], [OpenWrt\ v21.02.0],
|
||||
[Linksys\ Router], [Latest EOM], [OpenWrt\ v21.02.2], [OpenWrt\ v21.02.0]
|
||||
),
|
||||
caption: [Machines used for the experiments and their modifications.],
|
||||
)<tab:machines>
|
||||
)<tab-machines>
|
||||
|
||||
This experiment aims to simulate an attack situation by performing firmware modifications on the target devices and recording the boot-up power trace data for each version. For the switches, we flash different firmware versions provided by the \gle{oem}. For wireless routers, their firmware is changed from the \gls{oem} to different versions of OpenWrt. In this study, we consider the latest \gls{oem} firmware version to be the \textit{NORMAL} version, expected to be installed on the machine by default. Any other version or firmware is considered anomalous and represents an attack.
|
||||
This experiment aims at simulating an attack situation by performing firmware modifications on the target devices and recording the boot-up power trace data for each version.
|
||||
For the switches, we flash different firmware versions provided by the \gle{oem}.
|
||||
For wireless routers, their firmware is changed from the @OEM to different versions of #link("https://openwrt.org/")[OpenWrt].
|
||||
In this study, we consider the latest @OEM firmware version to be the nominal version, expected to be installed on the machine by default.
|
||||
Any other version or firmware represent an attack and is considered anomalous.
|
||||
|
||||
== Experiment procedure
|
||||
To account for randomness and gather representative boot-up sequences of the device, we performed \numprint{500} boot iterations for each machine. This cannot reasonably be performed manually with consistency. Therefore, an automation script controls the \gls{pdu} with precise timings to perform the boots without human intervention.
|
||||
To account for randomness and gather representative boot-up sequences of the device, we performed 500 boot iterations for each machine.
|
||||
This cannot reasonably be performed manually with consistency.
|
||||
Therefore, an automation script controls the @PDU with precise timings to perform the boots without human intervention.
|
||||
|
||||
The exact experimental procedure followed for each target has minor variations depending on the target's boot-up requirements. Overall, they all follow the same template with different timings.
|
||||
- Step 1: Turn ON the power to the machine.
|
||||
- Step 2: Wait for a predetermined period (depending on the specific target) for the target to boot up completely.
|
||||
- Step 3: Turn OFF the power to the machine and wait for a few seconds to ensure proper shutdown of the machine.
|
||||
The exact experimental procedure followed for each target has minor variations depending on the target's boot-up requirements and timings.
|
||||
Overall, they all follow the same template:
|
||||
|
||||
== Boot-up Sequence Extraction<sds>
|
||||
A threshold-based algorithm extracts the boot-up sequences from the complete trace. The extraction is not performed manually because of the large number of samples and to ensure a consistent detection of the boot-up pattern and a precise alignment of the different sequences extracted. Because the boot-up sequence usually begins with a sharp increase in power consumption from the device, the algorithm leverages this rising edge to detect the start time accurately. Two parameters control the extraction. $T$ is the consumption threshold, and $L$ is the length of the boot-up sequence. To extract all the boot-up sequences in a power trace, the algorithm evaluates consecutive samples against $T$. If sample $s_{i-1}<T$ and $s_i>T$ then $s_i$ is the first sample of a boot-up sequence and the next $L$ samples are extracted. The power trace is resampled at $50"ms"$ using a median aggregating function to avoid any incorrect detections. This pre-processing removes most of the \textit{salt-and-pepper} noise that could falsely trigger the detection method. The final step of the detection is to store all the boot sequences under the same label for evaluation. The complete dataset corresponding to the experiments presented in this paper can be found online @dataset}.
|
||||
+ Turn ON the power to the machine.
|
||||
+ Wait for a predetermined time for the target to boot up completely.
|
||||
+ Turn OFF the power to the machine and wait for a few seconds to ensure proper shutdown of the machine.
|
||||
|
||||
== Results<results>
|
||||
We obtain the result per machine and per model. The training dataset is generated by injecting artificial anomalies, but the evaluation is performed on actual anomalous traces collected in a controlled environment. For each evaluation, a random set of $10$ consecutive traces is selected from the \textit{NORMAL} label to serve as the seed for the anomaly generation. The anomaly generator returns a training dataset composed of normal traces on one side and anomalous artificial traces on the other. The models train using this dataset and are evaluated against a balanced dataset combining $M\in[20,50]$ consecutive anomalous traces selected at random across all non \textit{NON-NORMAL} classes and as many \textit{NORMAL} traces. The testing set is balanced between \textit{NORMAL} and \textit{NON-NORMAL} traces. The training requires only a few \textit{NORMAL} traces. This evaluation is repeated $50$ times, and the $F_1$ score is computed for each iteration. The final score is the average of these $F_1$ scores. The results are presented in @tab:results.
|
||||
|
||||
// \begin{table}[h]
|
||||
// \centering
|
||||
// %\begin{tabular}{|p{0.32\linewidth} |p{0.32\linewidth}| p{0.32\linewidth}|}
|
||||
// \begin{tabularx}{\linewidth}{|X|X|X|}
|
||||
// \hline
|
||||
// \textbf{Machine} & \textbf{AIM Model} & \textbf{IQR Model}\\
|
||||
// \hline
|
||||
// \hline
|
||||
// TP-Link switch & 0.993 & 0.866\\
|
||||
// \hline
|
||||
// HP switch & 0.73 & 0.983\\
|
||||
// \hline
|
||||
// Asus router & 0.995 & 1\\
|
||||
// \hline
|
||||
// Linksys router & 0.899 & 0.921\\
|
||||
// \hline
|
||||
// \end{tabularx}
|
||||
// \caption{Results of detection.}
|
||||
// \label{tab:results}
|
||||
// \end{table}
|
||||
We obtain the result per machine and per model.
|
||||
The training dataset is generated by injecting artificial anomalies, but the evaluation is performed on actual anomalous traces collected in a controlled environment.
|
||||
For each evaluation, a random set of $10$ consecutive traces is selected from the NORMAL label to serve as the seed for the anomaly generation.
|
||||
The anomaly generator returns a training dataset composed of normal traces on one side and anomalous artificial traces on the other.
|
||||
The models train using this dataset and are evaluated against a balanced dataset combining $M in [20,50]$ consecutive anomalous traces selected at random across all abnormal classes and as many nonimal traces.
|
||||
The testing set is balanced between nominal and abnormal traces.
|
||||
The training requires only a few nominal traces.
|
||||
This evaluation is repeated $50$ times, and the $F_1$ score is computed for each iteration.
|
||||
The final score is the average of these $F_1$ scores.
|
||||
The results are presented in @tab-results.
|
||||
|
||||
#figure(
|
||||
table(
|
||||
columns: 3,
|
||||
[*Machine*], [*AIM Model*], [*IQR Model*],
|
||||
[TP-Link switch], [0.993], [0.866],
|
||||
[HP switch], [0.73], [0.983],
|
||||
[Asus router], [0.995], [1],
|
||||
[Linksys router], [0.899], [0.921]
|
||||
columns: 2,
|
||||
[*Machine*], [*BPV*],
|
||||
[TP-Link switch], [0.866],
|
||||
[HP switch], [0.983],
|
||||
[Asus router], [1],
|
||||
[Linksys router], [0.921]
|
||||
),
|
||||
caption: [Results of detection.]
|
||||
)<tab:results>
|
||||
)<tab-results>
|
||||
|
||||
There are two hyper-parameters to tune to obtain the best performances. First, the length of the trace considered is essential. The trace needs to cover the whole boot-up sequence to be sure to detect any possible change. It is better to avoid extending the trace too much after the firmware sequence is done, as the typical operation of the machine can produce noisy power consumption that interferes with the optimal placement of the threshold. Secondly, the number of training traces can be optimized. A minimum of four traces is required for the \gls{iqr} method based on quartiles. A minimum of two traces are necessary for the \gls{svm} Threshold method as anomalous traces need to be generated based on the average and standard deviation of the normal dataset. Collecting additional traces after these lower boundaries offers marginal performance improvements as the number of traces has little impact on the threshold placement of both models. Moreover, collecting many boot-up sequences can be difficult to achieve in practice. Finally, tuning the sampling rate is important to ensure the best performances. A machine boot-up in two seconds will require a higher sampling rate than a machine booting in thirty seconds. All these parameters are machine-specific and need to be manually tuned before deployment of the side-channel \gls{ids}.
|
||||
There are two hyper-parameters to tune to obtain the best performances.
|
||||
First, the length of the trace considered is essential.
|
||||
The trace needs to cover the whole boot-up sequence to be sure to detect any possible change.
|
||||
It is better to avoid extending the trace too much after the firmware sequence is done, as the typical operation of the machine can produce noisy power consumption that interferes with the optimal placement of the threshold.
|
||||
Second, the number of training traces can be optimized.
|
||||
A minimum of four traces is required for the @IQR method based on quartiles.
|
||||
A minimum of two traces are necessary for the @SVM Threshold method as anomalous traces need to be generated based on the average and standard deviation of the normal dataset.
|
||||
Collecting additional traces after these lower boundaries offers marginal performance improvements as the number of traces has little impact on the threshold placement of both models.
|
||||
Moreover, collecting many boot-up sequences can be difficult to achieve in practice.
|
||||
Finally, tuning the sampling rate is important to ensure the best performances.
|
||||
A machine boot-up in two seconds will require a higher sampling rate than a machine booting in thirty seconds.
|
||||
All these parameters are machine-specific and need manual tuning before deployment of the side-channel @IDS.
|
||||
|
||||
= Test Case 2: Drone
|
||||
= Test Case 2: Drone<exp-drone>
|
||||
|
||||
In this case study, we demonstrate the potential of physics-based @IDS for drones.
|
||||
Drones are not new but their usage both in the consumer and professional sectors increased in recent years @droneincrease.
|
||||
The core component of consumer-available drones is usually a microcontroller, also called a flight controller.
|
||||
As with any other microcontrollers, the flight controller of a drone and its main program (we call the main program firmware in this paper) are subject to updates and attacks @8326960 @8433205.
|
||||
Some of these attacks leverage firmware manipulations @8556480.
|
||||
With custom firmware uploaded to a drone, many attack possibilities become accessible to the attacker, such as geofencing an area, recovering video feed, or damaging the drone.
|
||||
Moreover, flight controllers as specialized devices that usually do not support the installation of third-party security software nor provide advanced security features such as cryptographic verification of the firmware.
|
||||
With drone usage soaring and the lack of security solutions, the problem of verifying their firmware against anomalies becomes important.
|
||||
|
||||
== Experimental Setup
|
||||
|
||||
== Experiment Procedure
|
||||
The experimental setup for this case study is similar to the one presented in @exp-network.
|
||||
The experiment focuses on the Spiri Mu drone #footnote[#link("https://spirirobotics.com/products/spiri-mu/")] flashed with the PX4 Drone Autopilot firmware #footnote[#link("https://px4.io/")].
|
||||
The firmware for the flight controller consists of a microprocessor-specific bootloader, a second-stage bootloader common to all supported flight controllers.
|
||||
|
||||
== Results
|
||||
The battery of the drone is disconnected to ensure reproducible results and replaced with a laboratory power supply.
|
||||
The power consumption measurement device is attached in series with the main power cable that provides an 11V @DC current to the drone.
|
||||
A controllable relay is placed in series with the main cable to enable scripted bootup and shutdown scenarios.
|
||||
The experiment scenarios are:
|
||||
|
||||
= Test Case 3: Aggregated Power Measurements
|
||||
Results from L3 experiment. Present following the same model as for the previous ones.
|
||||
|
||||
= Specific Case Study: Anomaly Infused Model
|
||||
When training a model to detect outliers, it is often expected to have examples of possible anomalies. In some cases, gathering anomalies can be difficult, costly, or impossible. In the context of this study, it would be impractical to measure power consumption patterns for a wide range of firmware anomalies. Such data collection would require modifying firmware parameters, suspending equipment usage, or infecting machines with malicious firmware. These modifications are impossible for production equipment and would still lead to an incomplete training dataset. To circumvent this limitation, we leveraged the specificity of the distance-based detectors. Distance-based detectors produce results based solely on the distance between two traces and a learned threshold. The threshold is chosen to separate normal and anomalous traces as well as possible. The actual pattern of the traces is not important for this type of detector as only the aggregated distance of each sample matters. This implies that a distance-based detector that relies on a distance threshold can be trained the same way with either actual anomalous traces or with artificial traces that present the same distance to the reference. The idea behind an \gls{aim} is to leverage this property and generate artificial anomalous traces to form the training set. This training is generated from only normal traces, which circumvents the need for extensive data collection.
|
||||
|
||||
The generation of anomalies from normal traces is based on the modification of the global pattern. Data augmentation can leverage different time series modification methods to help a model generalize. The kind of modification applied to a trace is highly dependent on the application and the model @zimmering2021generating and requires domain knowledge about the system. In this case, we want to generate adversarial traces with patterns similar to actual anomalous traces from a machine. The first step of this process is to extract domain knowledge from all the traces collected. The type of modification an anomalous trace present compared to a normal trace help us design anomaly generation functions that apply the same type of transformation to normal traces with varying parameters. The goal is not the reproduce exact anomalous traces but to generate a wide variety of possible anomalous traces given a small set of normal traces. The domain knowledge extracted from the study of anomalous traces is of two kinds:
|
||||
|
||||
- The trace is shifted along the $y$ axis. In this case, the anomalous firmware consumes significantly more or less power than the normal one. This shift can affect the whole trace or only a part of it. This can be the result of a different usage of the machine's components or a significant change in the firmware instructions.
|
||||
- The trace is delayed or in advance along the $x$ axis. The anomalous trace present the same patterns and amplitude as the normal trace but at different points in time. This shift can occur when parts of the firmware are added or removed by updates.
|
||||
|
||||
An example of anomalous trace can be found in @fig:boot-up_traces_TPLINK.
|
||||
- *Nominal*: The first two versions consisted of unmodified firmware provided by the PX4 project, the first one was a pre-compiled version, and the second one was locally compiled. Although both version should be identical, some differences appeared in their consumption pattern and required the training of a dual-mode model.
|
||||
- *Low Battery*: When the drone starts with a low battery level, its behaviour changes to signal the user of the issue. Any battery level below 11V is considered low. In this scenario, a nominal firmware is loaded, and the drone starts with 10V, triggering the low battery behaviour.
|
||||
- *Malfunctioning Firmware*: Two malfunctioning firmware versions were compiled. The first introduces a _division-by-zero_ bug in the second stage bootloader. The second introduces the same bug but in the battery management module (in the OS part of the firmware). The second scenario should not introduce measurable anomalous patterns in the boot-up sequence as it only affects the OS stage. #agd[Explain that we add this scenario for sanity check].
|
||||
|
||||
#figure(
|
||||
image("images/Bootup_traces_TPLINK.svg", width: 80%),
|
||||
image("images/drone-overlaps.svg", width: 100%),
|
||||
caption: [Overlap of bootup traces for different scenarios and their average. Green = Low Battery (8 traces + average), Purple = Battery Module Bug (8 traces + average), Orange = Bootloader Bug (6 traces + average).]
|
||||
)
|
||||
|
||||
== Results
|
||||
The experiment procedure consists in starting the drone flight controller multiple times while capturing the power consumption.
|
||||
The experiment consists in repeating each scenario between 40 and 100 times.
|
||||
The experiment procedure automatically captures boot-up traces for better reproductibility (see @sds for more details).
|
||||
|
||||
@drone-results presents the results of the detection.
|
||||
Both Original and Compiled represent nominal firmware versions.
|
||||
|
||||
#figure(
|
||||
table(
|
||||
columns: (40%,20%,40%),
|
||||
[*Scenario*],[*Accuracy*], [*Nbr. of Samples*],
|
||||
[Original],[1],[98],
|
||||
[Compiled],[1],[49],
|
||||
[Low Battery],[1],[44],
|
||||
[Bootloader Bug],[1],[50],
|
||||
[Battery Module Bug], [0.082],[39],
|
||||
),
|
||||
caption: [Results of the intrusion detection on the drone.]
|
||||
)<drone-results>
|
||||
|
||||
Each scenario introduces disturbances in the boot-up sequence power consumption.
|
||||
The model correctly identifies the anomalous firmware.
|
||||
One interesting scenario is the Battery Module Bug that is mostly detected as nominal.
|
||||
This result is expected as the bug affects the operations of the firmware after the bootup sequence.
|
||||
Hence, the power consumption in the first second of activity remains nominal.
|
||||
#agd[Should the result of the battery module bug remain, or is it confusing to present scenarios where the BPV expectedly fails?]
|
||||
|
||||
It is interesting to notice that the differences in power consumption patterns among the different firmware are visible immediately after the initial power spike.
|
||||
This suggests that future work could achieve an even lower time-to-decision, likely as low as 200ms depending on the anomaly.
|
||||
|
||||
|
||||
// = Test Case 3: Aggregated Power Measurements
|
||||
// In some cases, capturing only the power consumption of the machine to protect is impossible.
|
||||
// For example, if the power connections follow proprietary designs, or if the machine to protect is innaccessible (for practical or security reasons).
|
||||
// In this case, the data available may be an aggregate of the consumption of multiple machines or components.
|
||||
// This power global power consumption measurement is still suitable for boot process verification.
|
||||
|
||||
// This test case was conducted with an industry partner to protect a micro-pc running Windows 10.
|
||||
// The available power consumption was an aggregate of two micro-pc, one being the machine to protect.
|
||||
// The second machine remained idle for the duration of the experiment.
|
||||
// @l3-setup illustrate the setup for the data capture.
|
||||
|
||||
// Although this setup can seem simplistic or ideal, it is a first approache to evaluating the applicability of @BPV in a more complexe environement.
|
||||
// The data presented in this test case come from a real installation, not from a controlled laboratory environment.
|
||||
|
||||
// #figure(
|
||||
// image("images/l3-setup.svg", width: 100%),
|
||||
// caption: [Setup for BPV with an aggregated power measurement.]
|
||||
// )<l3-setup>
|
||||
|
||||
// == Results
|
||||
|
||||
|
||||
= Specific Case Study: @AIM <aim>
|
||||
When training a model to detect outliers, it is often expected to have examples of possible anomalies.
|
||||
In some cases, gathering anomalies can be difficult, costly, or impossible.
|
||||
In the context of this study, it would be impractical to measure power consumption patterns for a wide range of firmware anomalies.
|
||||
Such data collection would require modifying firmware parameters, suspending equipment usage, or infecting production machines with malicious firmware.
|
||||
These modifications are impossible for production equipment and would still lead to an incomplete training dataset.
|
||||
To circumvent this limitation, we propose a variation of the training process called @AIM.
|
||||
@AIM leverages the specificity of the distance-based detectors.
|
||||
Distance-based detectors produce results based solely on the distance between two traces and a learned threshold.
|
||||
The threshold is chosen to separate normal and anomalous traces as well as possible.
|
||||
The actual pattern of the traces is not important for this type of detector as only the aggregated distance of each sample matters.
|
||||
This implies that a distance-based detector that relies on a distance threshold can be trained the same way with either real anomalous traces or with artificial traces that present the same distance to the reference.
|
||||
The idea behind an @AIM is to leverage this property and generate artificial anomalous traces to form the training set.
|
||||
The additional anomalous traces are generated using only normal traces, which circumvents the need for extensive data collection.
|
||||
|
||||
== Anomaly Generation
|
||||
The generation of anomalies from normal traces is based on the modification of the boot-up pattern.
|
||||
Data augmentation can leverage different time series modification methods to help a model generalize.
|
||||
The kind of modification applied to a trace is highly dependent on the application and the model @zimmering2021generating and requires domain knowledge about the system.
|
||||
In this case, we want to generate anomalous traces with patterns similar to actual anomalous traces from a machine.
|
||||
The first step of this process is to extract domain knowledge from all the traces collected.
|
||||
The type of modification an anomalous trace present compared to a normal trace help us design anomaly generation functions that apply the same type of transformation to normal traces with varying parameters.
|
||||
The goal is not the reproduce exact anomalous traces but to generate a wide variety of possible anomalous traces given a small set of normal traces.
|
||||
|
||||
#figure(
|
||||
image("images/Bootup_traces_TPLINK.svg", width: 100%),
|
||||
caption: [
|
||||
Example of TP-Link switch boot-up traces for different firmware versions. The anomalous firmware (FIRMWARE V2) present both a $y$ and $x$ shift.
|
||||
],
|
||||
)<fig:boot-up_traces_TPLINK>
|
||||
)<fig-boot-up_traces_TPLINK>
|
||||
|
||||
As illustrated in @fig-boot-up_traces_TPLINK, the domain knowledge extracted from the study of anomalous traces is of two type:
|
||||
|
||||
- The trace is shifted along the $y$ axis. In this case, the anomalous firmware consumes significantly more or less power than the normal one. This shift can affect the whole trace or only a part of it. This can be the result of different usage of the machine's components or a significant change in the firmware instructions.
|
||||
- The trace is delayed or in advance along the $x$ axis. The anomalous trace presents the same patterns and amplitude as the normal trace but at different points in time. This shift can occur when parts of the firmware are added or removed by updates.
|
||||
|
||||
The anomaly generation function combines the domain knowledge observations and applies anomalies to generate examples of anomalous traces from normal traces. The transformations include:
|
||||
The anomaly generation function combines the domain knowledge observations and applies anomalies to generate examples of anomalous traces from normal traces.
|
||||
The transformations include:
|
||||
|
||||
- Shifting the time domain. The direction of the shift can be forward (introducing a delay) or backward (removing a delay). The parameters of the shift are the amplitude and the start time. Both parameters are randomly selected for each new trace. The boundaries of these values do not include very large shifts as these would not contribute to the threshold placement for the models selected. The missing parts of the trace after shifting are recreated based on the average and standard deviation value of the previous \numprint[s]{0.5} assuming a Gaussian noise.
|
||||
- Shifting the time domain. The direction of the shift can be forward (introducing a delay) or backward (removing a delay). The parameters of the shift are the amplitude and the start time. Both parameters are randomly selected for each new trace. The boundaries of these values do not include very large shifts as these would not contribute to the threshold placement for the models selected. The missing parts of the trace after shifting are recreated based on the average and standard deviation value of the previous 0.5s assuming a Gaussian noise.
|
||||
|
||||
- Shifting the $y$ axis. The direction of the shift can be upward (more energy consumed) or downward (less energy consumed). The amplitude is chosen between $4$ and $5$ times the standard deviation for each sample. These values ensure not creating an anomalous trace that conflicts with the normal traces and removing any shift too large that would not contribute to the threshold placement. The start time is chosen randomly in the trace.
|
||||
|
||||
- Shifting both the $x$ and $y$ axis. Anomalous always presents a combination of $x$ shift, $y$ shift, or both.
|
||||
|
||||
The algorithm for the anomaly generation function is presented in Algorithm \ref{algo}.
|
||||
|
||||
// \begin{algorithm}[h]
|
||||
// \caption{Anomaly Generation Procedure}\label{algo}
|
||||
// \begin{algorithmic}[1]
|
||||
// \State $x_{amp} \gets \text{random float} \in[-4,-2]\cup[4,2]$
|
||||
// \State $y_{amp} \gets \text{random float} \in[-5,-4]\cup[4,5]$
|
||||
// \State Generate a new trace using the data augmenter.
|
||||
// \State Select the direction of the shift: $x$,$y$ or both.
|
||||
// \If{$y$ shift selected}
|
||||
// \State Add standard deviation multiplied with $y_{amp}$ to the new trace.
|
||||
// \EndIf
|
||||
// \If{$x$ shift selected}
|
||||
// \State Select start time of shift.
|
||||
// \State Shift new trace by $x_{amp}$ seconds.
|
||||
// \State Recreate missing part based on previous $0.5$ seconds.
|
||||
// \EndIf
|
||||
|
||||
// \Return new anomalous trace
|
||||
// \end{algorithmic}
|
||||
// \end{algorithm}
|
||||
|
||||
// #figure(caption: [Anomaly Generation Procedure],
|
||||
// ```
|
||||
|
||||
// ```
|
||||
// )<algo>
|
||||
|
||||
The resulting dataset does not precisely resemble the anomalous traces that are collected but presents traces with the same range of distance as normal traces (see @fig:Synthetic_vs_Normal_TPLINK).
|
||||
The dataset is balanced to avoid training bias by including new normal traces that are generated using the average and standard deviation of the available normal traces.
|
||||
- Shifting both the $x$ and $y$ axis. Anomalous traces always presents a combination of $x$ shift, $y$ shift, or both.
|
||||
|
||||
The resulting dataset does not exactly resemble the anomalous traces that are collected but presents traces with the same range of distance to normal traces (see @fig-Synthetic_vs_Normal_TPLINK).
|
||||
To avoid introducing traning biases, the dataset is balanced by generating new normal traces using the average and standard deviation if required.
|
||||
|
||||
#figure(
|
||||
image("images/Synthetic_vs_Normal_TPLINK.svg"),
|
||||
image("images/Synthetic_vs_Normal_TPLINK.svg", width: 100%),
|
||||
caption: [Example of generated synthetic anomalous traces vs normal traces for TP-Link switch.],
|
||||
)<fig:Synthetic_vs_Normal_TPLINK>
|
||||
)<fig-Synthetic_vs_Normal_TPLINK>
|
||||
|
||||
== Results
|
||||
A benchmarking algorithm evaluate the performances of @AIM against the performances of the original @BPV trained with only normal traces.
|
||||
@AIM places the threshold to maximize the margins to the closest normal distance and abnormal distance, in the same way a 1D-@SVM would.
|
||||
This is a natural extension of the @BPV when abnormal samples are available.
|
||||
|
||||
Two main parameters are important to tune for the @AIM.
|
||||
First, the range for the lenght of the x shift, and especially its lower bound, has an important influence on the generated anomalies.
|
||||
A small lower bound allow for the generation of anomalous traces that closely resemble the nominal traces, that can result in a sub-optimal threshold placement.
|
||||
Second, the range parameter for the y-shift affect the results in the same way.
|
||||
The values for these parameters are chosen as part of the domain knowledge extraction and they affect the transferability of the model (see @aim-conclusion).
|
||||
|
||||
The performances are evaluated on the same dataset as for the initial @BPV evaluation (see~@exp-network).
|
||||
The performance metric is the F1 score.
|
||||
The final performance measure is the average F1 score (and its standard deviation) over 30 independent run.
|
||||
Each run select five random normal traces as seed for the dataset generation.
|
||||
The training dataset is composed of 100 training traces and 100 evaluation races.
|
||||
|
||||
|
||||
The results are presented in @tab-aim
|
||||
|
||||
#figure(
|
||||
table(
|
||||
columns:(33%,33%,33%),
|
||||
[*Machine*], [*BPV*], [*AIM*],
|
||||
[HP-SWITCH],[$0.895 plus.minus 0.094$],[$0.657 plus.minus 0.394$],
|
||||
[TPLINK-SWITCH], [$0.9 plus.minus 0.084$],[$0.985 plus.minus 0.035$],
|
||||
[WAP-ASUS], [$1.0 plus.minus 0.0$],[$0.987 plus.minus 0.041$],
|
||||
[WAP-LINKSYS],[$0.882 plus.minus 0.099$],[$0.867 plus.minus 0.098$],
|
||||
),
|
||||
caption: [Performances of the @AIM model compared with the original @BPV model (average F1 score #sym.plus.minus std).]
|
||||
)<tab-aim>
|
||||
|
||||
== Conclusion on the @AIM Model<aim-conclusion>
|
||||
|
||||
The @AIM model produces mixed results.
|
||||
The model was tuned for the the TPLINK-SWITCH machine and produces significantly better results for this machine.
|
||||
However, the results did not transfer well to the other machines.
|
||||
Experiments reveal that the values of parameters that produces the best results can differ significantly from one machine to the other, even for the same type of machines.
|
||||
The idea of introducing artificial anomalous examples in the training dataset is valid and can indeed enable the creation of a better model.
|
||||
This artificial augmentation of the training set is especially interesting in the context of rare events where creating an extensive dataset is expensive.
|
||||
However, the lack of transferability of the proposed methods indicate that further work is required to evolve @AIM into an undeniably better solution compared to @BPV.
|
||||
|
||||
= Discussion<discussion>
|
||||
#agd["Add subsection"]
|
||||
This study only evaluates the performance of this method on a mix of consumer and enterprise networking devices. The results give us great confidence that a side-channel \gls{ids} can be deployed to other types of equipment. This technology is, in theory, applicable to any embedded system. Depending on the type of machine, taping the power consumption or identifying a reliable boot-up sequence can be difficult. Further studies will aim at illustrating the potential of side-channel firmware verification on a wider range of machines. There are studies to be conducted on the detection decision from the model's output in order to provide a more reliable and generic detection model by leveraging the output of multiple models focused on specific time-series features.
|
||||
This study only evaluates the performance of this method on a mix of consumer and enterprise networking devices.
|
||||
The results give us great confidence that a side-channel @IDS can be deployed to other types of equipment.
|
||||
This technology is, in theory, applicable to any embedded system. Depending on the type of machine, taping the power consumption or identifying a reliable boot-up sequence can be difficult.
|
||||
Further studies will aim at illustrating the potential of side-channel firmware verification on a wider range of machines.
|
||||
There are studies to be conducted on the detection decision from the model's output in order to provide a more reliable and generic detection model by leveraging the output of multiple models focused on specific time-series features.
|
||||
|
||||
== Limitations of Anomaly Generation
|
||||
== Hyper Parameters
|
||||
#agd[already discussed in the first experiment, watch out for double infos]
|
||||
|
||||
== Extraction of Synchronized Bootup Traces<sds>
|
||||
|
||||
A threshold-based algorithm extracts the boot-up sequences from the complete trace.
|
||||
The extraction is not performed manually because of the large number of samples and to ensure a consistent detection of the boot-up pattern and a precise alignment of the different sequences extracted.
|
||||
Because the boot-up sequence usually begins with a sharp increase in power consumption from the device, the algorithm leverages this rising edge to detect the start time accurately.
|
||||
Two parameters control the extraction.
|
||||
$T$ is the consumption threshold, and $L$ is the length of the boot-up sequence.
|
||||
To extract all the boot-up sequences in a power trace, the algorithm evaluates consecutive samples against $T$.
|
||||
If sample $s_{i-1}<T$ and $s_i>T$, then $s_i$ is the first sample of a boot-up sequence, and the next $L$ samples are extracted.
|
||||
The power trace is resampled at $50"ms"$ using a median aggregating function to avoid any incorrect detections.
|
||||
This pre-processing removes most of the impulse noise that could falsely trigger the detection method.
|
||||
The final step of the detection is to store all the boot sequences under the same label for evaluation.
|
||||
The complete dataset corresponding to this experiment is available online @dataset.
|
||||
|
||||
== Support for Online Training
|
||||
In order for the @BPV to integrate in a realistic environment, the training procedure takes the rareness of the boot-up event into account.
|
||||
Once the measurement device is setup on the machine to protect, the streaming time series representing the power consumption serves as input for the bootup detection algorithm (see @sds).
|
||||
Each bootup event is extracted and added to a dataset of bootup traces.
|
||||
Once the dataset reaches the expected number of samples, the @BPV computes the threshold and is ready for validation of the next bootup.
|
||||
The complete training and validation procedures require no human interractions.
|
||||
|
||||
In the case of a multi-modal model, the training procedure require one human interraction.
|
||||
Presented with the bootup samples, an operator can define transform the model into a multi-modal model by separating the training samples into multiple modes.
|
||||
Once the separation is performed, the training procedure resumes without interraction and the next bootup samples are assigned to the closest mode.
|
||||
|
||||
Thanks to its low-complexity and support for multi-modes, the @BPV can adapt during training to changes in the training data and supports switching between single and multi-modes.
|
||||
|
||||
= Conclusion<conclusion>
|
||||
This study illustrates the applicability of side-channel analysis to detect firmware attacks. The proposed side-channel-based \gls{ids} can reliably detect firmware tampering from the power consumption trace. Moreover, distance-based models leveraged in this study allow minimal training data and training time requirements. Anomaly generation is leveraged to enhance the training set without additional anomalous data capture. Finally, deploying this technology to production networking equipment requires minimal downtime and hardware intrusion, and it is applicable to clientless equipment.
|
||||
This study illustrates the applicability of side-channel analysis to detect firmware attacks.
|
||||
The proposed side-channel-based @IDS can detect firmware tampering from the power consumption trace.
|
||||
Moreover, distance-based models leveraged in this study allow minimal training data and training time requirements.
|
||||
On a per-machine basis, anomaly generation can enhance the training set without additional anomalous data capture.
|
||||
Finally, deploying this technology to production networking equipment requires minimal downtime and hardware intrusion, and it is applicable to clientless equipment.
|
||||
This study illustrates the potential of independent, side-channel-based @IDS for the detection of low-level attacks that can compromise machines event before the operating system gets loaded.
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
[#footnote[agd: #content]]
|
||||
}
|
||||
|
||||
#let cn = text(purple,size:15pt)[ #super[citation needed]]
|
||||
#let cn = text(purple,size:15pt)[ #super[[citation needed]]]
|
||||
|
||||
// problem statement environment
|
||||
// Usage:
|
||||
|
|
@ -20,3 +20,45 @@
|
|||
])
|
||||
]
|
||||
|
||||
#let reset-acronym(term) = {
|
||||
// Reset a specific acronym. It will be expanded on next use.
|
||||
if term in acronyms{
|
||||
state("acronym-state-" + term, false).update(false)
|
||||
}
|
||||
}
|
||||
|
||||
#let reset-all-acronyms() = {
|
||||
// Reset all acronyms. They will all be expanded on next use.
|
||||
for term in acronyms.keys() {
|
||||
state("acronym-state-" + term, false).update(false)
|
||||
}
|
||||
}
|
||||
|
||||
// Show rule to paste in the document for the acronym generation to work.
|
||||
// #show ref: r =>{// Overload the reference definition
|
||||
|
||||
// // Grab the term, target of the reference
|
||||
// let term = if type(r.target) == "label"{
|
||||
// str(r.target)
|
||||
// }
|
||||
// else{
|
||||
// // I don't know why the target could not be a type label but it is handled
|
||||
// none
|
||||
// }
|
||||
|
||||
// if term in acronyms{
|
||||
// // Grab definition of the term
|
||||
// let definition = acronyms.at(term)
|
||||
// // Generate the key associated with this term
|
||||
// let state-key = "acronym-state-" + term
|
||||
// // Create a state to keep track of the expansion of this acronym
|
||||
// state(state-key,false).display(seen => {if seen{term}else{[#definition (#term)]}})
|
||||
// // Update state to true as it has just been defined
|
||||
// state(state-key, false).update(true)
|
||||
// }
|
||||
// else{
|
||||
// r
|
||||
// }
|
||||
// }
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue