{"package":"flash-attn-4","ecosystem":"pypi","latest_version":"4.0.0b9","description":"Flash Attention CUTE (CUDA Template Engine) implementation","license":"BSD 3-Clause License","homepage":"https://pypi.org/project/flash-attn-4/","repository":"https://github.com/Dao-AILab/flash-attention","downloads_weekly":79937,"health":{"score":76,"risk":"moderate","breakdown":{"maintenance":25,"popularity":10,"security":25,"maturity":6,"community":10},"deprecated":false,"max_score":100},"vulnerabilities":{"count":0,"critical":0,"high":0,"medium":0,"low":0,"details":[]},"versions":{"latest":"4.0.0b9","total_count":7,"recent":["0.0.1","4.0.0b3","4.0.0b4","4.0.0b5","4.0.0b7","4.0.0b8","4.0.0b9"]},"metadata":{"deprecated":false,"deprecated_message":null,"maintainers_count":1,"first_published":null,"last_published":"2026-04-15T08:41:53.440119Z","dependencies_count":11,"dependencies":["nvidia-cutlass-dsl>=4.4.2","torch","einops","typing_extensions","apache-tvm-ffi<0.2,>=0.1.5","torch-c-dlpack-ext","quack-kernels>=0.3.3","nvidia-cutlass-dsl[cu13]>=4.4.2; extra == \"cu13\"","pytest; extra == \"dev\"","pytest-xdist; extra == \"dev\"","ruff; extra == \"dev\""]},"bundle":null,"typescript":null,"known_issues":{"bugs_count":0,"bugs_severity":{},"status_breakdown":{},"link":null,"scope":"none"},"recommendation":{"action":"safe_to_use","issues":[],"use_version":"4.0.0b9","version_hint":null,"summary":"flash-attn-4@4.0.0b9 is safe to use (health: 76/100)"},"requested_version":null,"_cache":"miss","_response_ms":637,"_powered_by":"depscope.dev — free package intelligence for AI agents","typosquat":{"is_suspected":false},"maintainer_trust":{"available":false},"malicious":{"is_malicious":false},"scorecard":{"available":false},"quality":{"available":false}}