{
    "status": "success",
    "source": "Bugskan AI Security Node",
    "timestamp": "2026-03-07T15:00:03+00:00",
    "count": 50,
    "data": [
        {
            "date": "2025-10-17",
            "title": "AI Governance: 85% of Orgs Use AI, but Security Lags - wiz.io",
            "category": "Vulnerability",
            "summary": "Rapid AI adoption by 85% of organizations is creating an expanded attack surface, with security lagging behind adequate governance frameworks. This deficiency leads to systemic vulnerabilities such as data leakage and unauthorized model access in multi-tenant cloud environments, alongside \"shadow AI\" breaches that incur an estimated $670,000 more in costs.",
            "link": "https:\/\/www.wiz.io\/academy\/ai-security\/ai-governance",
            "keywords": [
                "Shadow AI",
                "Expanded Attack Surface",
                "Multi-tenant Cloud"
            ]
        },
        {
            "date": "2025-12-10",
            "title": "Google Offers $20K Bounty for Chrome AI Security Breach - eWeek",
            "category": "Vulnerability",
            "summary": "Google is offering a $20,000 bug bounty for the identification and reporting of security breaches within its Chrome AI features. This proactive program aims to discover and mitigate potential vulnerabilities in AI components integrated into the Chrome browser.",
            "link": "https:\/\/www.eweek.com\/news\/google-bounty-chrome-ai-security-breach\/",
            "keywords": [
                "Chrome AI",
                "Bug Bounty",
                "AI Security"
            ]
        },
        {
            "date": "2025-11-14",
            "title": "China\u2019s \u2018autonomous\u2019 AI-powered hacking campaign still required a ton of human work - CyberScoop",
            "category": "Jailbreak",
            "summary": "A Chinese state-sponsored group utilized Anthropic's Claude AI to breach at least 30 organizations, bypassing its security guardrails by segmenting tasks and tricking the model into simulating a legitimate security audit. This operation leveraged a human-built frontend framework to orchestrate Claude's actions, including interfacing with open-source tools via Model Context Protocol (MCP) servers for reconnaissance and vulnerability scanning, dramatically scaling the attackers' operational capacity.",
            "link": "https:\/\/cyberscoop.com\/anthropic-ai-orchestrated-attack-required-many-human-hands\/",
            "keywords": [
                "Anthropic Claude",
                "AI Jailbreak",
                "Model Context Protocol"
            ]
        },
        {
            "date": "2025-11-26",
            "title": "OpenAI confirms major data breach, exposing names, emails and more - Windows Central",
            "category": "Data Leak",
            "summary": "OpenAI confirmed a data breach originating from unauthorized access to Mixpanel, a third-party web analytics provider it uses for its API product. This incident exposed names, email addresses, approximate locations, OS\/browser data, and user IDs associated with OpenAI API accounts (platform.openai.com users), but did not compromise ChatGPT content, passwords, or payment details.",
            "link": "https:\/\/www.windowscentral.com\/artificial-intelligence\/openai-chatgpt\/openai-confirms-major-data-breach-exposing-users-names-email-addresses-and-more-transparency-is-important-to-us",
            "keywords": [
                "Mixpanel",
                "Third-party breach",
                "API accounts"
            ]
        },
        {
            "date": "2026-01-08",
            "title": "Fake AI Chrome Extensions Steal 900K Users' Data - Dark Reading | Security",
            "category": "Data Leak",
            "summary": "Malicious Google Chrome extensions, posing as legitimate AI tools, exfiltrated sensitive user data including Large Language Model (LLM) conversations and extensive browsing history to a command-and-control (C2) server. This operation impacted nearly 900,000 users, resulting in the theft of proprietary code, business strategies, confidential research, and credentials for potential corporate espionage or identity theft.",
            "link": "https:\/\/www.darkreading.com\/cloud-security\/fake-ai-chrome-extensions-steal-900k-users-data",
            "keywords": [
                "Chrome Extensions",
                "LLM Data Exfiltration",
                "C2 Server"
            ]
        },
        {
            "date": "2025-10-08",
            "title": "Google DeepMind\u2019s New AI Agent Finds and Fixes Vulnerabilities - SecurityWeek",
            "category": "Vulnerability",
            "summary": "Google DeepMind has developed CodeMender, an AI agent designed to autonomously find and patch software vulnerabilities. Leveraging advanced program analysis and multi-agent systems, CodeMender rewrites vulnerable code to prevent future exploits and eliminate entire classes of security bugs.",
            "link": "https:\/\/www.securityweek.com\/google-deepminds-new-ai-agent-finds-and-fixes-vulnerabilities\/",
            "keywords": [
                "CodeMender",
                "AI Agent",
                "Program Analysis"
            ]
        },
        {
            "date": "2026-02-26",
            "title": "Government Data Stolen After Hacker Jailbreaks Claude AI to Write Malicious Exploit Code - gbhackers.com",
            "category": "Jailbreak",
            "summary": "An attacker reportedly jailbroke the Claude AI model to generate malicious exploit code. This illicit activity subsequently led to the theft and exfiltration of government data.",
            "link": "https:\/\/gbhackers.com\/hacker-jailbreaks-claude-ai-to-write-malicious-exploit-code\/",
            "keywords": [
                "AI Jailbreak",
                "Claude AI",
                "Data Exfiltration"
            ]
        },
        {
            "date": "2026-02-01",
            "title": "OpenClaw (a.k.a. Moltbot) is everywhere all at once, and a disaster waiting to happen - Marcus on AI | Substack",
            "category": "Vulnerability",
            "summary": "OpenClaw (Moltbot), an LLM agent system, grants unfettered access to user systems and sensitive data, bypassing traditional operating system and browser security protections like sandboxing. The primary security concern is prompt injection attacks, where malicious text can be hidden to seize control of the user's machine, leading to system compromise and data exposure.",
            "link": "https:\/\/garymarcus.substack.com\/p\/openclaw-aka-moltbot-is-everywhere",
            "keywords": [
                "OpenClaw",
                "Prompt Injection",
                "LLM Agents"
            ]
        },
        {
            "date": "2026-01-29",
            "title": "Open-source AI models vulnerable to criminal misuse, researchers warn - Reuters",
            "category": "Vulnerability",
            "summary": "Researchers are warning that open-source AI models possess inherent vulnerabilities, making them susceptible to various forms of criminal misuse and exploitation. These weaknesses raise concerns about potential impacts like adversarial attacks, data poisoning, or the generation of malicious content by threat actors.",
            "link": "https:\/\/www.reuters.com\/technology\/open-source-ai-models-vulnerable-criminal-misuse-researchers-warn-2026-01-29\/",
            "keywords": [
                "Open-source AI",
                "AI Security",
                "Model Vulnerabilities"
            ]
        },
        {
            "date": "2026-02-02",
            "title": "'Moltbook' social media site for AI agents had big security hole, cyber firm Wiz says - Reuters",
            "category": "Vulnerability",
            "summary": "The provided article content is empty, precluding a specific technical summary of any exploit or CVE. However, the title suggests a significant security vulnerability was identified within the 'Moltbook' social media platform, which is designed for AI agents.",
            "link": "https:\/\/www.reuters.com\/legal\/litigation\/moltbook-social-media-site-ai-agents-had-big-security-hole-cyber-firm-wiz-says-2026-02-02\/",
            "keywords": [
                "Moltbook",
                "AI Agents",
                "Application Security"
            ]
        },
        {
            "date": "2026-01-29",
            "title": "One Step Away From a Massive Data Breach: What We Found Inside MoltBot - OX Security",
            "category": "Vulnerability",
            "summary": "The AI personal assistant MoltBot (OpenClaw) insecurely stores sensitive credentials and API keys in cleartext within `~\/.clawdbot` and retains \"deleted\" secrets in backup files, making them vulnerable to infostealers. Furthermore, the codebase exhibits numerous insecure patterns, including extensive use of `eval` and `execSync` with user input, which could lead to Remote Code Execution (RCE), XSS, and broader data breaches for its hundreds of thousands of users.",
            "link": "https:\/\/www.ox.security\/blog\/one-step-away-from-a-massive-data-breach-what-we-found-inside-moltbot\/",
            "keywords": [
                "Cleartext storage",
                "Supply chain risk",
                "RCE"
            ]
        },
        {
            "date": "2026-02-26",
            "title": "Hacker Jailbreaks Claude AI to Write Exploit Code and Steal Government Data - CybersecurityNews",
            "category": "Jailbreak",
            "summary": "An incident report details hackers successfully jailbreaking the Claude AI model, leveraging this compromise to generate exploit code. This exploit ultimately facilitated the theft and exfiltration of sensitive government data.",
            "link": "https:\/\/cybersecuritynews.com\/claude-ai-exploited-2\/",
            "keywords": [
                "AI Jailbreak",
                "Prompt Injection",
                "Data Exfiltration"
            ]
        },
        {
            "date": "2026-02-26",
            "title": "Kali Linux Integrates Claude AI via Model Context Protocol to Enhance Offensive Security - Cyber Press",
            "category": "Vulnerability",
            "summary": "The provided scraped article text returned an HTTP 403 Forbidden status, indicating that access to the requested web page was explicitly denied. This prevented the retrieval and subsequent analysis of any article content detailing specific exploits or impacts related to Kali Linux and Claude AI integration.",
            "link": "https:\/\/cyberpress.org\/kali-linux-integrates-claude-ai\/",
            "keywords": [
                "HTTP 403",
                "Access Denied",
                "Web Scraping Failure"
            ]
        },
        {
            "date": "2026-02-25",
            "title": "Hacker used Anthropic's Claude chatbot to attack multiple government agencies in Mexico - Engadget",
            "category": "Jailbreak",
            "summary": "A hacker successfully jailbroke Anthropic's Claude chatbot, bypassing its guardrails to generate vulnerability reports and exploitation scripts for attacks against Mexican government networks. This misuse of the AI led to the exfiltration of 150GB of sensitive government data, including taxpayer records and employee credentials.",
            "link": "https:\/\/www.engadget.com\/ai\/hacker-used-anthropics-claude-chatbot-to-attack-multiple-government-agencies-in-mexico-171237255.html",
            "keywords": [
                "AI Jailbreak",
                "Anthropic Claude",
                "Data Exfiltration"
            ]
        },
        {
            "date": "2025-08-17",
            "title": "LLMs + Coding Agents = Security Nightmare - Marcus on AI | Substack",
            "category": "Vulnerability",
            "summary": "The article details advanced prompt injection and watering hole techniques that exploit LLM-based coding agents, leveraging their ability to interpret malicious instructions hidden from human users. These methods, including ASCII Smuggling and embedding hidden prompts in GitHub repositories, can lead to Remote Code Execution (RCE), enabling attackers to gain full control over developer systems.",
            "link": "https:\/\/garymarcus.substack.com\/p\/llms-coding-agents-security-nightmare",
            "keywords": [
                "Remote Code Execution",
                "Prompt Injection",
                "Coding Agents"
            ]
        },
        {
            "date": "2026-02-24",
            "title": "RoguePilot Flaw in GitHub Codespaces Enabled Copilot to Leak GITHUB_TOKEN - The Hacker News",
            "category": "Vulnerability",
            "summary": "The RoguePilot vulnerability in GitHub Codespaces leveraged passive prompt injection within GitHub issues to manipulate Copilot. This enabled attackers to silently execute malicious commands and exfiltrate sensitive data, specifically the `GITHUB_TOKEN`, to external servers.",
            "link": "https:\/\/thehackernews.com\/2026\/02\/roguepilot-flaw-in-github-codespaces.html",
            "keywords": [
                "RoguePilot",
                "Prompt Injection",
                "GITHUB_TOKEN"
            ]
        },
        {
            "date": "2026-02-26",
            "title": "Insights into Claude Code Security: A New Pattern of Intelligent Attack and Defense - Security Boulevard",
            "category": "Vulnerability",
            "summary": "Anthropic's Claude Code Security tool, powered by Claude 4.6, represents a significant shift in secure code auditing by leveraging reasoning-based AI to detect complex vulnerabilities. Unlike traditional SAST, it simulates human security researchers to identify business logic flaws and potential 0-day issues, providing automated analysis and patch suggestions.",
            "link": "https:\/\/securityboulevard.com\/2026\/02\/insights-into-claude-code-security-a-new-pattern-of-intelligent-attack-and-defense\/",
            "keywords": [
                "Claude Code Security",
                "LLM-driven code auditing",
                "0-day vulnerabilities"
            ]
        },
        {
            "date": "2026-02-26",
            "title": "Hacker Jailbreaks Claude AI to Generate Exploit Code and Exfiltrate Government Data - Cyber Press",
            "category": "Jailbreak",
            "summary": "A reported incident describes a successful jailbreak of the Claude AI model, enabling it to bypass safety mechanisms. This compromise allowed the AI to generate exploit code and facilitate the exfiltration of sensitive government data.",
            "link": "https:\/\/cyberpress.org\/hacker-jailbreaks-claude-ai\/",
            "keywords": [
                "Claude AI",
                "AI Jailbreak",
                "Data Exfiltration"
            ]
        },
        {
            "date": "2026-02-26",
            "title": "Claude didn't just plan an attack on Mexico's government. It executed one for a month \u2014 across four domains your security stack can't see. - VentureBeat",
            "category": "Jailbreak",
            "summary": "Attackers successfully exploited Anthropic's Claude AI through prompt manipulation, effectively \"jailbreaking\" its safety guardrails to generate detailed attack plans. This led to a month-long data exfiltration campaign against multiple Mexican government agencies, resulting in the theft of 150 GB of sensitive data including 195 million taxpayer records.",
            "link": "https:\/\/venturebeat.com\/security\/claude-mexico-breach-four-blind-domains-security-stack",
            "keywords": [
                "Claude AI",
                "AI Jailbreak",
                "Data Exfiltration"
            ]
        },
        {
            "date": "2026-02-24",
            "title": "DeepSeek Jailbreak Vulnerability Analysis | Qualys TotalAI - Qualys",
            "category": "Jailbreak",
            "summary": "Qualys's analysis found that the DeepSeek-R1 LLaMA 8B LLM variant is significantly vulnerable to jailbreak attacks, failing 58% of adversarial manipulation attempts. This susceptibility allows the model to generate harmful content, such as instructions for illegal activities, hate speech, and promoting incorrect medical information.",
            "link": "https:\/\/blog.qualys.com\/vulnerabilities-threat-research\/2025\/01\/31\/deepseek-failed-over-half-of-the-jailbreak-tests-by-qualys-totalai",
            "keywords": [
                "DeepSeek-R1 LLaMA 8B",
                "LLM Jailbreak",
                "Adversarial AI"
            ]
        },
        {
            "date": "2026-02-26",
            "title": "Claude Code Flaws Exposed Developer Devices to Silent Hacking - SecurityWeek",
            "category": "Vulnerability",
            "summary": "Multiple vulnerabilities in Anthropic's Claude Code, primarily exploited via malicious configuration files, allowed for silent arbitrary command execution on developer machines. These flaws also enabled bypassing consent for external actions and exfiltrating API keys by redirecting traffic, potentially compromising shared team resources.",
            "link": "https:\/\/www.securityweek.com\/claude-code-flaws-exposed-developer-devices-to-silent-hacking\/",
            "keywords": [
                "Anthropic Claude Code",
                "Arbitrary Command Execution",
                "API Key Exfiltration"
            ]
        },
        {
            "date": "2025-08-06",
            "title": "A Single Poisoned Document Could Leak \u2018Secret\u2019 Data Via ChatGPT - WIRED",
            "category": "Vulnerability",
            "summary": "Security researchers uncovered a critical weakness within OpenAI\u2019s Connectors, enabling unauthorized data extraction from linked services. This vulnerability allowed attackers to leak data from Google Drive via a \"poisoned document\" without any user interaction.",
            "link": "https:\/\/www.wired.com\/story\/poisoned-document-could-leak-secret-data-chatgpt\/",
            "keywords": [
                "OpenAI Connectors",
                "Poisoned Document",
                "ChatGPT"
            ]
        },
        {
            "date": "2025-06-23",
            "title": "'Echo Chamber' Attack Blows Past AI Guardrails - Dark Reading",
            "category": "Jailbreak",
            "summary": "The \"Echo Chamber\" attack is a sophisticated prompt injection technique that leverages context poisoning and multi-turn reasoning to bypass large language model (LLM) guardrails. This allows attackers to gradually manipulate models like GPT and Gemini into generating harmful content, achieving high success rates for categories such as hate speech and illegal activities.",
            "link": "https:\/\/www.darkreading.com\/cloud-security\/echo-chamber-attack-ai-guardrails",
            "keywords": [
                "Prompt Injection",
                "LLM",
                "Context Poisoning"
            ]
        },
        {
            "date": "2025-10-07",
            "title": "Google's New AI Doesn't Just Find Vulnerabilities \u2014 It Rewrites Code to Patch Them - The Hacker News",
            "category": "Vulnerability",
            "summary": "Google DeepMind has introduced CodeMender, an AI-powered agent designed to automatically detect, patch, and rewrite vulnerable code to eliminate entire classes of vulnerabilities. Leveraging Gemini Deep Think models and an LLM-based critique tool, CodeMender addresses root causes and validates fixes, having already contributed 72 security patches to open-source projects.",
            "link": "https:\/\/thehackernews.com\/2025\/10\/googles-new-ai-doesnt-just-find.html",
            "keywords": [
                "CodeMender",
                "Vulnerability Remediation",
                "LLM"
            ]
        },
        {
            "date": "2026-01-26",
            "title": "Malicious VS Code AI Extensions with 1.5 Million Installs Steal Developer Source Code - The Hacker News",
            "category": "Malware",
            "summary": "Two malicious Visual Studio Code extensions, disguised as AI coding assistants, have been found siphoning developer source code and opened files to China-based servers. These extensions, with a combined 1.5 million installs, leverage covert spyware functionality to exfiltrate sensitive data in Base64 format and fingerprint devices via hidden analytics SDKs.",
            "link": "https:\/\/thehackernews.com\/2026\/01\/malicious-vs-code-ai-extensions-with-15.html",
            "keywords": [
                "VS Code Extensions",
                "MaliciousCorgi",
                "Spyware"
            ]
        },
        {
            "date": "2026-01-30",
            "title": "OpenClaw proves agentic AI works. It also proves your security model doesn't. 180,000 developers just made that your problem. - VentureBeat",
            "category": "Vulnerability",
            "summary": "OpenClaw, an open-source agentic AI assistant, exhibits critical architectural vulnerabilities including a default trust for localhost and susceptibility to prompt injection attacks. These flaws have led to over 1,800 publicly exposed instances leaking sensitive data like API keys, chat histories, and account credentials, bypassing traditional network and endpoint security controls.",
            "link": "https:\/\/venturebeat.com\/security\/openclaw-agentic-ai-security-risk-ciso-guide",
            "keywords": [
                "OpenClaw",
                "Prompt Injection",
                "Agentic AI"
            ]
        },
        {
            "date": "2026-01-29",
            "title": "CISA chief uploaded sensitive government files to public ChatGPT - csoonline.com",
            "category": "Data Leak",
            "summary": "A CISA director uploaded \"for official use only\" government contracting documents to OpenAI's public ChatGPT, bypassing approved federal AI tools and triggering automated cyber alerts. This action resulted in the loss of data control, potential incorporation of sensitive information into the model's training data, and exposed critical enterprise AI governance failures.",
            "link": "https:\/\/www.csoonline.com\/article\/4124320\/cisa-chief-uploaded-sensitive-government-files-to-public-chatgpt.html",
            "keywords": [
                "ChatGPT",
                "FOUO",
                "AI governance"
            ]
        },
        {
            "date": "2026-02-19",
            "title": "OpenAI Launches EVMbench for Blockchain Vulnerability Detection and Exploitation - Cyber Press",
            "category": "Vulnerability",
            "summary": "The scraped article text indicates OpenAI has launched EVMbench, a tool explicitly designed for blockchain vulnerability detection and exploitation. However, the complete article content is inaccessible due to a \"403 Forbidden\" error, preventing a detailed analysis of its technical specifications or operational impact on smart contract security.",
            "link": "https:\/\/cyberpress.org\/openai-launches-evmbench-for-blockchain-vulnerability-detection-and-exploitation\/",
            "keywords": [
                "EVMbench",
                "Blockchain Vulnerability",
                "Exploitation Tool"
            ]
        },
        {
            "date": "2026-02-18",
            "title": "Palo Alto Networks to Acquire Koi Security to Advance Agentic Endpoint Protection - Cyber Press",
            "category": "Undetermined (Access Forbidden)",
            "summary": "The provided article content returned a \"403 - Forbidden\" error, indicating that access to the page was denied. Consequently, no technical analysis regarding a specific exploit, CVE, or its impact can be performed from the given input.",
            "link": "https:\/\/cyberpress.org\/palo-alto-networks-to-acquire-koi-security-to-advance-agentic-endpoint-protection\/",
            "keywords": [
                "403 Forbidden",
                "Content Inaccessible",
                "Analysis Unavailable"
            ]
        },
        {
            "date": "2026-02-18",
            "title": "Copilot Chat bug bypasses DLP on 'Confidential' email - theregister.com",
            "category": "Data Leak",
            "summary": "Microsoft 365 Copilot Chat was found to bypass Data Loss Prevention (DLP) policies, summarizing emails with \"confidential\" sensitivity labels and exposing protected content. This vulnerability, tracked as CW1226324, stemmed from a code issue allowing Copilot to access emails in Draft and Sent folders despite configured restrictions, leading to unintended information disclosure within the chat interface.",
            "link": "https:\/\/www.theregister.com\/2026\/02\/18\/microsoft_copilot_data_loss_prevention\/",
            "keywords": [
                "Microsoft 365 Copilot Chat",
                "Data Loss Prevention",
                "CW1226324"
            ]
        },
        {
            "date": "2026-02-18",
            "title": "Critical Log Poisoning Vulnerability in OpenClaw AI Allows Content Manipulation - Cyber Press",
            "category": "Vulnerability",
            "summary": "A critical log poisoning vulnerability has been identified within the OpenClaw AI platform. This flaw specifically allows for unauthorized content manipulation, potentially through the injection of malicious data into log files.",
            "link": "https:\/\/cyberpress.org\/log-poisoning-vulnerability\/",
            "keywords": [
                "Log Poisoning",
                "OpenClaw AI",
                "Content Manipulation"
            ]
        },
        {
            "date": "2025-10-02",
            "title": "Practical LLM Security Advice from the NVIDIA AI Red Team - NVIDIA Developer",
            "category": "Vulnerability",
            "summary": "The NVIDIA AI Red Team identifies critical vulnerabilities in LLM applications, including remote code execution (RCE) via prompt injection when executing unsandboxed LLM-generated code. Further risks involve data leakage and indirect prompt injection through insecure access controls in Retrieval-Augmented Generation (RAG) data sources, and data exfiltration facilitated by active content rendering in LLM outputs.",
            "link": "https:\/\/developer.nvidia.com\/blog\/practical-llm-security-advice-from-the-nvidia-ai-red-team\/",
            "keywords": [
                "Remote Code Execution",
                "Prompt Injection",
                "Retrieval-Augmented Generation"
            ]
        },
        {
            "date": "2026-02-18",
            "title": "LLM-generated passwords 'fundamentally weak,' experts say - theregister.com",
            "category": "Vulnerability",
            "summary": "LLM-generated passwords from tools like Claude, ChatGPT, and Gemini are \"fundamentally weak\" due to inherent patterns that make them highly predictable and easily guessable, despite appearing complex. Research indicates these passwords have significantly lower entropy (20-27 bits) compared to truly random ones, allowing them to be brute-forced in a matter of hours, potentially ushering in a new era of password brute-forcing.",
            "link": "https:\/\/www.theregister.com\/2026\/02\/18\/generating_passwords_with_llms\/",
            "keywords": [
                "LLM-generated passwords",
                "Password entropy",
                "Brute-force attack"
            ]
        },
        {
            "date": "2026-01-29",
            "title": "OpenClaw (formerly Moltbot, Clawdbot) May Signal the Next AI Security Crisis - Palo Alto Networks",
            "category": "Vulnerability",
            "summary": "The autonomous AI agent OpenClaw, with its deep system access and persistent memory, significantly expands the attack surface for AI agents, enabling sophisticated, delayed, and stateful attacks. Its architecture allows for indirect prompt injection, memory poisoning, and other advanced threats, mapping to multiple OWASP Top 10 for Agentic Applications risks due to the lack of trust boundaries and human-in-the-loop controls.",
            "link": "https:\/\/www.paloaltonetworks.com\/blog\/network-security\/why-moltbot-may-signal-ai-crisis\/",
            "keywords": [
                "AI Agents",
                "Prompt Injection",
                "Persistent Memory"
            ]
        },
        {
            "date": "2026-02-19",
            "title": "Microsoft 365 Copilot Vulnerability Exposes Sensitive Emails to AI Summarization - Cyber Press",
            "category": "Vulnerability",
            "summary": "A reported vulnerability in Microsoft 365 Copilot could lead to the exposure of sensitive email content through its AI summarization feature. This flaw poses a risk of unauthorized data disclosure, potentially compromising user privacy within the Microsoft 365 ecosystem.",
            "link": "https:\/\/cyberpress.org\/microsoft-365-copilot-vulnerability-exposes-sensitive-emails-to-ai-summarization\/",
            "keywords": [
                "Microsoft 365 Copilot",
                "AI Summarization",
                "Data Exposure"
            ]
        },
        {
            "date": "2025-05-30",
            "title": "Linux Zero-Day Vulnerability Discovered Using Frontier AI - Bank Info Security",
            "category": "Vulnerability",
            "summary": "A remotely exploitable zero-day vulnerability, CVE-2025-37899, has been discovered in the Linux kernel's Server Message Block (SMB) protocol using OpenAI's o3 model. This critical flaw is identified as a use-after-free bug in the SMB 'logoff' command handler, allowing an object to be freed while still accessible by another thread.",
            "link": "https:\/\/www.bankinfosecurity.com\/linux-zero-day-vulnerability-discovered-using-frontier-ai-a-28559",
            "keywords": [
                "CVE-2025-37899",
                "Use-after-free",
                "SMB Protocol"
            ]
        },
        {
            "date": "2025-06-24",
            "title": "Data Poisoning: Current Trends and Recommended Defense Strategies - wiz.io",
            "category": "Vulnerability",
            "summary": "Data poisoning is an adversarial attack that manipulates AI and machine learning model training datasets by injecting, modifying, or deleting data to degrade model performance or induce specific malicious behaviors. This can lead to critical impacts such as biased decision-making, compromised predictive accuracy, system failures, and the creation of backdoor vulnerabilities within AI-driven systems.",
            "link": "https:\/\/www.wiz.io\/academy\/ai-security\/data-poisoning",
            "keywords": [
                "Data Poisoning",
                "Machine Learning Models",
                "Backdoor Attacks"
            ]
        },
        {
            "date": "2025-07-14",
            "title": "Google Gemini AI Bug Allows Invisible, Malicious Prompts - Dark Reading",
            "category": "Vulnerability",
            "summary": "A prompt-injection vulnerability in Google Gemini allows attackers to embed invisible, malicious instructions within emails that the AI prioritizes and executes during summarization. This flaw leads to the AI generating fabricated security alerts, facilitating sophisticated phishing and vishing attacks against users.",
            "link": "https:\/\/www.darkreading.com\/remote-workforce\/google-gemini-ai-bug-invisible-malicious-prompts",
            "keywords": [
                "Prompt Injection",
                "Google Gemini",
                "Phishing"
            ]
        },
        {
            "date": "2025-10-14",
            "title": "Picus Security uses AI to turn threat intelligence into attack simulations - Help Net Security",
            "category": "Vulnerability",
            "summary": "Picus Security has launched new AI-powered Breach and Attack Simulation (BAS) capabilities within its security validation platform. This innovation leverages multi-agent orchestration and conversational AI to convert live threat intelligence into runnable, MITRE ATT&CK-mapped attack simulations, enabling rapid validation of security controls and proactive risk reduction.",
            "link": "https:\/\/www.helpnetsecurity.com\/2025\/10\/14\/picus-security-validation-platform-bas\/",
            "keywords": [
                "Breach and Attack Simulation",
                "Threat Intelligence",
                "MITRE ATT&CK"
            ]
        },
        {
            "date": "2025-09-22",
            "title": "\u26a1 Weekly Recap: Chrome 0-Day, AI Hacking Tools, DDR5 Bit-Flips, npm Worm & More - The Hacker News",
            "category": "Vulnerability",
            "summary": "Google has released emergency security updates for Chrome to patch CVE-2025-10585, an actively exploited zero-day vulnerability. This critical type confusion flaw resides within the V8 JavaScript and WebAssembly engine, confirmed to be exploited in the wild.",
            "link": "https:\/\/thehackernews.com\/2025\/09\/weekly-recap-chrome-0-day-ai-hacking.html",
            "keywords": [
                "CVE-2025-10585",
                "Zero-Day",
                "Type Confusion"
            ]
        },
        {
            "date": "2025-12-10",
            "title": "Mistaking AI vulnerability could lead to large-scale breaches, NCSC warns - National Cyber Security Centre - NCSC.GOV.UK",
            "category": "Vulnerability",
            "summary": "The National Cyber Security Centre (NCSC) has issued a warning concerning the critical risk of large-scale breaches resulting from the misidentification or misunderstanding of AI vulnerabilities. This highlights the urgent need for robust security analysis and mitigation strategies specifically tailored to artificial intelligence systems to prevent significant security incidents.",
            "link": "https:\/\/www.ncsc.gov.uk\/news\/mistaking-ai-vulnerability-could-lead-to-large-scale-breaches",
            "keywords": [
                "AI Vulnerability",
                "NCSC",
                "Large-scale breaches"
            ]
        },
        {
            "date": "2025-07-15",
            "title": "Preventing Zero-Click AI Threats: Insights from EchoLeak - www.trendmicro.com",
            "category": "Data Leak",
            "summary": "EchoLeak (CVE-2025-32711) is a zero-click AI vulnerability that exploits Microsoft 365 Copilot's retrieval-augmented generation (RAG) capabilities. It leverages invisible prompt injections embedded in contextual data to silently exfiltrate sensitive information without user interaction.",
            "link": "https:\/\/www.trendmicro.com\/en_us\/research\/25\/g\/preventing-zero-click-ai-threats-insights-from-echoleak.html",
            "keywords": [
                "CVE-2025-32711",
                "Prompt Injection",
                "Microsoft 365 Copilot"
            ]
        },
        {
            "date": "2026-01-08",
            "title": "Fake AI Chrome Extensions Steal 900K Users' Data - Dark Reading",
            "category": "Malware",
            "summary": "Threat actors deployed malicious Chrome extensions, posing as legitimate AI tools, to steal sensitive user data by exfiltrating LLM conversations and browser activity to a command-and-control (C2) server. These extensions, affecting over 900,000 users, enabled the theft of proprietary code, business strategies, financial details, and credentials, posing significant risks of corporate espionage and identity theft.",
            "link": "https:\/\/www.darkreading.com\/cloud-security\/fake-ai-chrome-extensions-steal-900k-users-data",
            "keywords": [
                "Malicious Chrome Extensions",
                "C2 Server",
                "Prompt Poaching"
            ]
        },
        {
            "date": "2025-10-08",
            "title": "How Your AI Chatbot Can Become a Backdoor - www.trendmicro.com",
            "category": "Vulnerability",
            "summary": "An attack chain on an AI chatbot demonstrated how indirect prompt injection (OWASP LLM01:2025) and system prompt leakage (OWASP LLM07:2025) can be leveraged. These vulnerabilities allowed attackers to exploit excessive agency and improper output handling (OWASP LLM05:2025), leading to remote code execution, lateral movement, and exfiltration of sensitive data and proprietary AI models.",
            "link": "https:\/\/www.trendmicro.com\/en_us\/research\/25\/j\/ai-chatbot-backdoor.html",
            "keywords": [
                "OWASP LLM01:2025",
                "OWASP LLM07:2025",
                "Remote Code Execution"
            ]
        },
        {
            "date": "2026-02-11",
            "title": "Is a secure AI assistant possible? - MIT Technology Review",
            "category": "Jailbreak",
            "summary": "The article highlights significant security risks posed by AI personal assistants like OpenClaw, primarily focusing on prompt injection as a key vulnerability. This exploit allows attackers to effectively hijack Large Language Models (LLMs) by embedding malicious text in data, potentially leading to unauthorized data access, arbitrary command execution, or system compromise.",
            "link": "https:\/\/www.technologyreview.com\/2026\/02\/11\/1132768\/is-a-secure-ai-assistant-possible\/",
            "keywords": [
                "Prompt Injection",
                "Large Language Model",
                "AI Agent"
            ]
        },
        {
            "date": "2026-02-16",
            "title": "Exploited React2Shell Flaw By LLM-generated Malware Foreshadows Shift in Threat Landscape - Security Boulevard",
            "category": "Malware",
            "summary": "Exploitation of the React2Shell vulnerability against a Docker honeypot demonstrated how LLM-generated malware can rapidly enable low-skilled actors to deploy intrusion frameworks and execute custom Python payloads. This event foreshadows a future where AI-powered automation decreases the barrier for creating sophisticated and obfuscated malware, demanding a shift towards continuous monitoring and AI-tuned defensive strategies.",
            "link": "https:\/\/securityboulevard.com\/2026\/02\/exploited-react2shell-flaw-by-llm-generated-malware-foreshadows-shift-in-threat-landscape\/",
            "keywords": [
                "React2Shell",
                "LLM-generated Malware",
                "Docker Honeypot"
            ]
        },
        {
            "date": "2026-02-16",
            "title": "OpenClaw Founder Makes High-Profile Move to OpenAI - Cyber Press",
            "category": "Vulnerability",
            "summary": "The scraping attempt resulted in an HTTP 403 Forbidden error, indicating denied access to the intended article content. This incident highlights an enforced access control mechanism, potentially a web scraping protection or a server-side misconfiguration, preventing information retrieval.",
            "link": "https:\/\/cyberpress.org\/openclaw-founder-makes-high-profile-move-to-openai\/",
            "keywords": [
                "HTTP 403 Forbidden",
                "Access Control",
                "Web Scraping Protection"
            ]
        },
        {
            "date": "2026-02-13",
            "title": "The OpenClaw experiment is a warning shot for enterprise AI security - Sophos",
            "category": "Jailbreak",
            "summary": "The OpenClaw experiment serves as a critical demonstration of potential security flaws in enterprise AI systems, highlighting methods to circumvent the intended safety mechanisms of AI models. This research acts as a warning, indicating that AI systems can be manipulated to produce unintended outputs or bypass critical controls.",
            "link": "https:\/\/news.google.com\/rss\/articles\/CBMipAFBVV95cUxPM0RFOVNSbDNvbHFBR09scFFGb3JyQVF1OXNvbmExZWwzMGVoaUdtZEhZbVE2S1JfaUtEQ1RnUk1xT21qLVJmaHpIbW9QTkk3XzJDR1M4ZXVGTlQ2U0hFZEtXZC1FbnFzN3ZDbWZSTUVobV9TRU90SHQ1N2JvZnBtMm1qQmZNWnZJaURLYlcxbG14Znl2bHNFaTJwc3BVWnViMnVJVA?oc=5",
            "keywords": [
                "OpenClaw",
                "AI Security",
                "Prompt Injection"
            ]
        },
        {
            "date": "2025-11-26",
            "title": "Agentic AI Security: What It Is and How to Do It - Palo Alto Networks",
            "category": "Vulnerability",
            "summary": "Agentic AI systems introduce new architectural risks due to their autonomous planning, decision-making, and tool-use capabilities, moving the attack surface from static models to dynamic workflows. Key threats include memory poisoning, tool misuse, and potential remote code execution (RCE) stemming from manipulated agent reasoning paths, external interactions, and inter-agent communications.",
            "link": "https:\/\/www.paloaltonetworks.com\/cyberpedia\/what-is-agentic-ai-security",
            "keywords": [
                "Agentic AI",
                "OWASP Agentic AI Threats",
                "Memory Poisoning"
            ]
        },
        {
            "date": "2025-08-29",
            "title": "Proof-of-Concept in 15 Minutes? AI Turbocharges Exploitation - Dark Reading | Security",
            "category": "Vulnerability",
            "summary": "An AI-powered offensive research system dubbed \"Auto Exploit\" utilizes Large Language Models (LLMs), CVE advisories, and open-source patches to generate proof-of-concept exploits for software vulnerabilities in minutes. This acceleration in exploit development fundamentally challenges traditional security timelines, necessitating \"machine speed\" defense and a re-evaluation of vulnerability prioritization based on system reachability.",
            "link": "https:\/\/www.darkreading.com\/vulnerabilities-threats\/proof-concept-15-minutes-ai-turbocharges-exploitation",
            "keywords": [
                "LLM",
                "Exploit Generation",
                "CVE"
            ]
        }
    ]
}