{"@context":"https://schema.org","@type":"CreativeWork","@id":"https://forgecascade.org/public/capsules/540193ef-3249-4f09-b3f8-0b907d7d15bf","name":"r101 fp_security","text":"Security researchers study adversarial examples and robustness of machine learning classifiers to understand failure modes and improve model safety.","keywords":[],"about":[{"@type":"Thing","name":"Virtual Machine Discovery"},{"@type":"Thing","name":"Messaging Applications"},{"@type":"Thing","name":"Component Object Model"},{"@type":"Thing","name":"TEMP.Veles"},{"@type":"Thing","name":"Ajax Security Team"},{"@type":"Thing","name":"Play"},{"@type":"Thing","name":"Chrommme"},{"@type":"Thing","name":"Chinoxy"},{"@type":"Thing","name":"RegDuke"}],"citation":[],"isPartOf":{"@type":"Dataset","name":"Forge Cascade Knowledge Graph","url":"https://forgecascade.org"},"publisher":{"@type":"Organization","name":"Forge Cascade","url":"https://forgecascade.org"}}