mirror of
https://github.com/nomi-sec/PoC-in-GitHub.git
synced 2025-09-10 19:40:07 +02:00
33 lines
No EOL
1.4 KiB
JSON
33 lines
No EOL
1.4 KiB
JSON
[
|
|
{
|
|
"id": 1033055362,
|
|
"name": "CVE-2025-54794-Hijacking-Claude-AI-with-a-Prompt-Injection-The-Jailbreak-That-Talked-Back",
|
|
"full_name": "AdityaBhatt3010\/CVE-2025-54794-Hijacking-Claude-AI-with-a-Prompt-Injection-The-Jailbreak-That-Talked-Back",
|
|
"owner": {
|
|
"login": "AdityaBhatt3010",
|
|
"id": 96762636,
|
|
"avatar_url": "https:\/\/avatars.githubusercontent.com\/u\/96762636?v=4",
|
|
"html_url": "https:\/\/github.com\/AdityaBhatt3010",
|
|
"user_view_type": "public"
|
|
},
|
|
"html_url": "https:\/\/github.com\/AdityaBhatt3010\/CVE-2025-54794-Hijacking-Claude-AI-with-a-Prompt-Injection-The-Jailbreak-That-Talked-Back",
|
|
"description": "A high-severity prompt injection flaw in Claude AI proves that even the smartest language models can be turned into weapons — all with a few lines of code.",
|
|
"fork": false,
|
|
"created_at": "2025-08-06T08:29:35Z",
|
|
"updated_at": "2025-09-02T07:50:12Z",
|
|
"pushed_at": "2025-08-06T08:43:29Z",
|
|
"stargazers_count": 2,
|
|
"watchers_count": 2,
|
|
"has_discussions": false,
|
|
"forks_count": 0,
|
|
"allow_forking": true,
|
|
"is_template": false,
|
|
"web_commit_signoff_required": false,
|
|
"topics": [],
|
|
"visibility": "public",
|
|
"forks": 0,
|
|
"watchers": 2,
|
|
"score": 0,
|
|
"subscribers_count": 0
|
|
}
|
|
] |