{"package":"ado-vllm-performance","ecosystem":"pypi","latest_version":"1.8.0","description":"VLLM performance testing actuator for ado","license":"","license_risk":"unknown","commercial_use_notes":"No license declared in registry metadata — verify manually before commercial use.","homepage":"https://pypi.org/project/ado-vllm-performance/","repository":"https://github.com/IBM/ado","downloads_weekly":135,"health":{"score":62,"risk":"moderate","breakdown":{"maintenance":25,"popularity":3,"security":25,"maturity":9,"community":0},"deprecated":false,"max_score":100},"vulnerabilities":{"count":0,"critical":0,"high":0,"medium":0,"low":0,"details":[]},"versions":{"latest":"1.8.0","total_count":12,"recent":["1.2.1","1.2.2","1.3.0","1.3.1","1.3.2","1.3.3","1.4.0","1.4.1","1.5.0","1.6.0","1.7.0","1.8.0"]},"metadata":{"deprecated":false,"deprecated_message":null,"maintainers_count":0,"first_published":null,"last_published":"2026-04-27T12:41:48.525514Z","dependencies_count":5,"dependencies":["ado-core","datasets>=2.20.0","kubernetes>=31.0.0","guidellm>=0.5.3; extra == \"guidellm\"","vllm!=0.15.*,>=0.12.0; sys_platform != \"darwin\" and extra == \"vllm\""]},"github_stats":null,"bundle":null,"typescript":null,"known_issues":{"bugs_count":0,"bugs_severity":{},"status_breakdown":{},"link":null,"scope":"none"},"historical_compromise":null,"recommendation":{"action":"use_with_caution","issues":["Moderate health score (62/100) — verify manually"],"use_version":"1.8.0","version_hint":null,"summary":"ado-vllm-performance@1.8.0 low health (62/100) — consider alternatives"},"version_scoped":null,"_meta":{"endpoint":"check","tier":"full","philosophy":"DepScope is free. Use the cheapest endpoint that answers your real question.","cheaper_alternatives":[{"endpoint":"/api/exists/pypi/ado-vllm-performance","tokens_estimated":12,"use_when":"you only need to know if the package exists (hallucination guard)"},{"endpoint":"/api/health/pypi/ado-vllm-performance","tokens_estimated":80,"use_when":"you only need a 0-100 score for go/no-go (>=70 = safe)"},{"endpoint":"/api/prompt/pypi/ado-vllm-performance","tokens_estimated":280,"use_when":"you want a plain-text LLM-friendly brief instead of JSON"},{"endpoint":"POST /api/check_bulk","tokens_estimated":60,"use_when":"you have 5+ packages to check; sends one round-trip instead of N"}],"docs":"https://depscope.dev/integrate","hint_bulk":"You've called /api/check 59 times in 60s. Save bandwidth + tokens with POST /api/check_bulk (1 round-trip for N pkgs)."},"requested_version":null,"_cache":"hit","_response_ms":0,"_powered_by":"depscope.dev — free package intelligence for AI agents","typosquat":{"is_suspected":false},"maintainer_trust":{"available":false},"malicious":{"is_malicious":false},"scorecard":{"available":false},"quality":{"available":false},"version_history_summary":{"total_versions":12,"first_release_age_days":null,"last_release_days_ago":5,"avg_days_between_releases":null,"release_velocity":"active"}}